1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #define ZLIB_CONST 28 #include <zlib.h> 29 30 #include "block/block_int.h" 31 #include "block/qdict.h" 32 #include "sysemu/block-backend.h" 33 #include "qemu/module.h" 34 #include "qcow2.h" 35 #include "qemu/error-report.h" 36 #include "qapi/error.h" 37 #include "qapi/qapi-events-block-core.h" 38 #include "qapi/qmp/qdict.h" 39 #include "qapi/qmp/qstring.h" 40 #include "trace.h" 41 #include "qemu/option_int.h" 42 #include "qemu/cutils.h" 43 #include "qemu/bswap.h" 44 #include "qapi/qobject-input-visitor.h" 45 #include "qapi/qapi-visit-block-core.h" 46 #include "crypto.h" 47 #include "block/thread-pool.h" 48 49 /* 50 Differences with QCOW: 51 52 - Support for multiple incremental snapshots. 53 - Memory management by reference counts. 54 - Clusters which have a reference count of one have the bit 55 QCOW_OFLAG_COPIED to optimize write performance. 56 - Size of compressed clusters is stored in sectors to reduce bit usage 57 in the cluster offsets. 58 - Support for storing additional data (such as the VM state) in the 59 snapshots. 60 - If a backing store is used, the cluster size is not constrained 61 (could be backported to QCOW). 62 - L2 tables have always a size of one cluster. 63 */ 64 65 66 typedef struct { 67 uint32_t magic; 68 uint32_t len; 69 } QEMU_PACKED QCowExtension; 70 71 #define QCOW2_EXT_MAGIC_END 0 72 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 73 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 74 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 75 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 76 77 static int coroutine_fn 78 qcow2_co_preadv_compressed(BlockDriverState *bs, 79 uint64_t file_cluster_offset, 80 uint64_t offset, 81 uint64_t bytes, 82 QEMUIOVector *qiov); 83 84 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 85 { 86 const QCowHeader *cow_header = (const void *)buf; 87 88 if (buf_size >= sizeof(QCowHeader) && 89 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 90 be32_to_cpu(cow_header->version) >= 2) 91 return 100; 92 else 93 return 0; 94 } 95 96 97 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 98 uint8_t *buf, size_t buflen, 99 void *opaque, Error **errp) 100 { 101 BlockDriverState *bs = opaque; 102 BDRVQcow2State *s = bs->opaque; 103 ssize_t ret; 104 105 if ((offset + buflen) > s->crypto_header.length) { 106 error_setg(errp, "Request for data outside of extension header"); 107 return -1; 108 } 109 110 ret = bdrv_pread(bs->file, 111 s->crypto_header.offset + offset, buf, buflen); 112 if (ret < 0) { 113 error_setg_errno(errp, -ret, "Could not read encryption header"); 114 return -1; 115 } 116 return ret; 117 } 118 119 120 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 121 void *opaque, Error **errp) 122 { 123 BlockDriverState *bs = opaque; 124 BDRVQcow2State *s = bs->opaque; 125 int64_t ret; 126 int64_t clusterlen; 127 128 ret = qcow2_alloc_clusters(bs, headerlen); 129 if (ret < 0) { 130 error_setg_errno(errp, -ret, 131 "Cannot allocate cluster for LUKS header size %zu", 132 headerlen); 133 return -1; 134 } 135 136 s->crypto_header.length = headerlen; 137 s->crypto_header.offset = ret; 138 139 /* Zero fill remaining space in cluster so it has predictable 140 * content in case of future spec changes */ 141 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 142 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen) == 0); 143 ret = bdrv_pwrite_zeroes(bs->file, 144 ret + headerlen, 145 clusterlen - headerlen, 0); 146 if (ret < 0) { 147 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 148 return -1; 149 } 150 151 return ret; 152 } 153 154 155 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 156 const uint8_t *buf, size_t buflen, 157 void *opaque, Error **errp) 158 { 159 BlockDriverState *bs = opaque; 160 BDRVQcow2State *s = bs->opaque; 161 ssize_t ret; 162 163 if ((offset + buflen) > s->crypto_header.length) { 164 error_setg(errp, "Request for data outside of extension header"); 165 return -1; 166 } 167 168 ret = bdrv_pwrite(bs->file, 169 s->crypto_header.offset + offset, buf, buflen); 170 if (ret < 0) { 171 error_setg_errno(errp, -ret, "Could not read encryption header"); 172 return -1; 173 } 174 return ret; 175 } 176 177 178 /* 179 * read qcow2 extension and fill bs 180 * start reading from start_offset 181 * finish reading upon magic of value 0 or when end_offset reached 182 * unknown magic is skipped (future extension this version knows nothing about) 183 * return 0 upon success, non-0 otherwise 184 */ 185 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 186 uint64_t end_offset, void **p_feature_table, 187 int flags, bool *need_update_header, 188 Error **errp) 189 { 190 BDRVQcow2State *s = bs->opaque; 191 QCowExtension ext; 192 uint64_t offset; 193 int ret; 194 Qcow2BitmapHeaderExt bitmaps_ext; 195 196 if (need_update_header != NULL) { 197 *need_update_header = false; 198 } 199 200 #ifdef DEBUG_EXT 201 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 202 #endif 203 offset = start_offset; 204 while (offset < end_offset) { 205 206 #ifdef DEBUG_EXT 207 /* Sanity check */ 208 if (offset > s->cluster_size) 209 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 210 211 printf("attempting to read extended header in offset %lu\n", offset); 212 #endif 213 214 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 215 if (ret < 0) { 216 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 217 "pread fail from offset %" PRIu64, offset); 218 return 1; 219 } 220 ext.magic = be32_to_cpu(ext.magic); 221 ext.len = be32_to_cpu(ext.len); 222 offset += sizeof(ext); 223 #ifdef DEBUG_EXT 224 printf("ext.magic = 0x%x\n", ext.magic); 225 #endif 226 if (offset > end_offset || ext.len > end_offset - offset) { 227 error_setg(errp, "Header extension too large"); 228 return -EINVAL; 229 } 230 231 switch (ext.magic) { 232 case QCOW2_EXT_MAGIC_END: 233 return 0; 234 235 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 236 if (ext.len >= sizeof(bs->backing_format)) { 237 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 238 " too large (>=%zu)", ext.len, 239 sizeof(bs->backing_format)); 240 return 2; 241 } 242 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 243 if (ret < 0) { 244 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 245 "Could not read format name"); 246 return 3; 247 } 248 bs->backing_format[ext.len] = '\0'; 249 s->image_backing_format = g_strdup(bs->backing_format); 250 #ifdef DEBUG_EXT 251 printf("Qcow2: Got format extension %s\n", bs->backing_format); 252 #endif 253 break; 254 255 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 256 if (p_feature_table != NULL) { 257 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 258 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 259 if (ret < 0) { 260 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 261 "Could not read table"); 262 return ret; 263 } 264 265 *p_feature_table = feature_table; 266 } 267 break; 268 269 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 270 unsigned int cflags = 0; 271 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 272 error_setg(errp, "CRYPTO header extension only " 273 "expected with LUKS encryption method"); 274 return -EINVAL; 275 } 276 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 277 error_setg(errp, "CRYPTO header extension size %u, " 278 "but expected size %zu", ext.len, 279 sizeof(Qcow2CryptoHeaderExtension)); 280 return -EINVAL; 281 } 282 283 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 284 if (ret < 0) { 285 error_setg_errno(errp, -ret, 286 "Unable to read CRYPTO header extension"); 287 return ret; 288 } 289 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 290 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 291 292 if ((s->crypto_header.offset % s->cluster_size) != 0) { 293 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 294 "not a multiple of cluster size '%u'", 295 s->crypto_header.offset, s->cluster_size); 296 return -EINVAL; 297 } 298 299 if (flags & BDRV_O_NO_IO) { 300 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 301 } 302 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 303 qcow2_crypto_hdr_read_func, 304 bs, cflags, 1, errp); 305 if (!s->crypto) { 306 return -EINVAL; 307 } 308 } break; 309 310 case QCOW2_EXT_MAGIC_BITMAPS: 311 if (ext.len != sizeof(bitmaps_ext)) { 312 error_setg_errno(errp, -ret, "bitmaps_ext: " 313 "Invalid extension length"); 314 return -EINVAL; 315 } 316 317 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 318 if (s->qcow_version < 3) { 319 /* Let's be a bit more specific */ 320 warn_report("This qcow2 v2 image contains bitmaps, but " 321 "they may have been modified by a program " 322 "without persistent bitmap support; so now " 323 "they must all be considered inconsistent"); 324 } else { 325 warn_report("a program lacking bitmap support " 326 "modified this file, so all bitmaps are now " 327 "considered inconsistent"); 328 } 329 error_printf("Some clusters may be leaked, " 330 "run 'qemu-img check -r' on the image " 331 "file to fix."); 332 if (need_update_header != NULL) { 333 /* Updating is needed to drop invalid bitmap extension. */ 334 *need_update_header = true; 335 } 336 break; 337 } 338 339 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 340 if (ret < 0) { 341 error_setg_errno(errp, -ret, "bitmaps_ext: " 342 "Could not read ext header"); 343 return ret; 344 } 345 346 if (bitmaps_ext.reserved32 != 0) { 347 error_setg_errno(errp, -ret, "bitmaps_ext: " 348 "Reserved field is not zero"); 349 return -EINVAL; 350 } 351 352 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 353 bitmaps_ext.bitmap_directory_size = 354 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 355 bitmaps_ext.bitmap_directory_offset = 356 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 357 358 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 359 error_setg(errp, 360 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 361 "exceeding the QEMU supported maximum of %d", 362 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 363 return -EINVAL; 364 } 365 366 if (bitmaps_ext.nb_bitmaps == 0) { 367 error_setg(errp, "found bitmaps extension with zero bitmaps"); 368 return -EINVAL; 369 } 370 371 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 372 error_setg(errp, "bitmaps_ext: " 373 "invalid bitmap directory offset"); 374 return -EINVAL; 375 } 376 377 if (bitmaps_ext.bitmap_directory_size > 378 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 379 error_setg(errp, "bitmaps_ext: " 380 "bitmap directory size (%" PRIu64 ") exceeds " 381 "the maximum supported size (%d)", 382 bitmaps_ext.bitmap_directory_size, 383 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 384 return -EINVAL; 385 } 386 387 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 388 s->bitmap_directory_offset = 389 bitmaps_ext.bitmap_directory_offset; 390 s->bitmap_directory_size = 391 bitmaps_ext.bitmap_directory_size; 392 393 #ifdef DEBUG_EXT 394 printf("Qcow2: Got bitmaps extension: " 395 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 396 s->bitmap_directory_offset, s->nb_bitmaps); 397 #endif 398 break; 399 400 default: 401 /* unknown magic - save it in case we need to rewrite the header */ 402 /* If you add a new feature, make sure to also update the fast 403 * path of qcow2_make_empty() to deal with it. */ 404 { 405 Qcow2UnknownHeaderExtension *uext; 406 407 uext = g_malloc0(sizeof(*uext) + ext.len); 408 uext->magic = ext.magic; 409 uext->len = ext.len; 410 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 411 412 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 413 if (ret < 0) { 414 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 415 "Could not read data"); 416 return ret; 417 } 418 } 419 break; 420 } 421 422 offset += ((ext.len + 7) & ~7); 423 } 424 425 return 0; 426 } 427 428 static void cleanup_unknown_header_ext(BlockDriverState *bs) 429 { 430 BDRVQcow2State *s = bs->opaque; 431 Qcow2UnknownHeaderExtension *uext, *next; 432 433 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 434 QLIST_REMOVE(uext, next); 435 g_free(uext); 436 } 437 } 438 439 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 440 uint64_t mask) 441 { 442 char *features = g_strdup(""); 443 char *old; 444 445 while (table && table->name[0] != '\0') { 446 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 447 if (mask & (1ULL << table->bit)) { 448 old = features; 449 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 450 table->name); 451 g_free(old); 452 mask &= ~(1ULL << table->bit); 453 } 454 } 455 table++; 456 } 457 458 if (mask) { 459 old = features; 460 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 461 old, *old ? ", " : "", mask); 462 g_free(old); 463 } 464 465 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 466 g_free(features); 467 } 468 469 /* 470 * Sets the dirty bit and flushes afterwards if necessary. 471 * 472 * The incompatible_features bit is only set if the image file header was 473 * updated successfully. Therefore it is not required to check the return 474 * value of this function. 475 */ 476 int qcow2_mark_dirty(BlockDriverState *bs) 477 { 478 BDRVQcow2State *s = bs->opaque; 479 uint64_t val; 480 int ret; 481 482 assert(s->qcow_version >= 3); 483 484 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 485 return 0; /* already dirty */ 486 } 487 488 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 489 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 490 &val, sizeof(val)); 491 if (ret < 0) { 492 return ret; 493 } 494 ret = bdrv_flush(bs->file->bs); 495 if (ret < 0) { 496 return ret; 497 } 498 499 /* Only treat image as dirty if the header was updated successfully */ 500 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 501 return 0; 502 } 503 504 /* 505 * Clears the dirty bit and flushes before if necessary. Only call this 506 * function when there are no pending requests, it does not guard against 507 * concurrent requests dirtying the image. 508 */ 509 static int qcow2_mark_clean(BlockDriverState *bs) 510 { 511 BDRVQcow2State *s = bs->opaque; 512 513 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 514 int ret; 515 516 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 517 518 ret = qcow2_flush_caches(bs); 519 if (ret < 0) { 520 return ret; 521 } 522 523 return qcow2_update_header(bs); 524 } 525 return 0; 526 } 527 528 /* 529 * Marks the image as corrupt. 530 */ 531 int qcow2_mark_corrupt(BlockDriverState *bs) 532 { 533 BDRVQcow2State *s = bs->opaque; 534 535 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 536 return qcow2_update_header(bs); 537 } 538 539 /* 540 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 541 * before if necessary. 542 */ 543 int qcow2_mark_consistent(BlockDriverState *bs) 544 { 545 BDRVQcow2State *s = bs->opaque; 546 547 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 548 int ret = qcow2_flush_caches(bs); 549 if (ret < 0) { 550 return ret; 551 } 552 553 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 554 return qcow2_update_header(bs); 555 } 556 return 0; 557 } 558 559 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs, 560 BdrvCheckResult *result, 561 BdrvCheckMode fix) 562 { 563 int ret = qcow2_check_refcounts(bs, result, fix); 564 if (ret < 0) { 565 return ret; 566 } 567 568 if (fix && result->check_errors == 0 && result->corruptions == 0) { 569 ret = qcow2_mark_clean(bs); 570 if (ret < 0) { 571 return ret; 572 } 573 return qcow2_mark_consistent(bs); 574 } 575 return ret; 576 } 577 578 static int coroutine_fn qcow2_co_check(BlockDriverState *bs, 579 BdrvCheckResult *result, 580 BdrvCheckMode fix) 581 { 582 BDRVQcow2State *s = bs->opaque; 583 int ret; 584 585 qemu_co_mutex_lock(&s->lock); 586 ret = qcow2_co_check_locked(bs, result, fix); 587 qemu_co_mutex_unlock(&s->lock); 588 return ret; 589 } 590 591 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 592 uint64_t entries, size_t entry_len, 593 int64_t max_size_bytes, const char *table_name, 594 Error **errp) 595 { 596 BDRVQcow2State *s = bs->opaque; 597 598 if (entries > max_size_bytes / entry_len) { 599 error_setg(errp, "%s too large", table_name); 600 return -EFBIG; 601 } 602 603 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 604 * because values will be passed to qemu functions taking int64_t. */ 605 if ((INT64_MAX - entries * entry_len < offset) || 606 (offset_into_cluster(s, offset) != 0)) { 607 error_setg(errp, "%s offset invalid", table_name); 608 return -EINVAL; 609 } 610 611 return 0; 612 } 613 614 static QemuOptsList qcow2_runtime_opts = { 615 .name = "qcow2", 616 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 617 .desc = { 618 { 619 .name = QCOW2_OPT_LAZY_REFCOUNTS, 620 .type = QEMU_OPT_BOOL, 621 .help = "Postpone refcount updates", 622 }, 623 { 624 .name = QCOW2_OPT_DISCARD_REQUEST, 625 .type = QEMU_OPT_BOOL, 626 .help = "Pass guest discard requests to the layer below", 627 }, 628 { 629 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 630 .type = QEMU_OPT_BOOL, 631 .help = "Generate discard requests when snapshot related space " 632 "is freed", 633 }, 634 { 635 .name = QCOW2_OPT_DISCARD_OTHER, 636 .type = QEMU_OPT_BOOL, 637 .help = "Generate discard requests when other clusters are freed", 638 }, 639 { 640 .name = QCOW2_OPT_OVERLAP, 641 .type = QEMU_OPT_STRING, 642 .help = "Selects which overlap checks to perform from a range of " 643 "templates (none, constant, cached, all)", 644 }, 645 { 646 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 647 .type = QEMU_OPT_STRING, 648 .help = "Selects which overlap checks to perform from a range of " 649 "templates (none, constant, cached, all)", 650 }, 651 { 652 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 653 .type = QEMU_OPT_BOOL, 654 .help = "Check for unintended writes into the main qcow2 header", 655 }, 656 { 657 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 658 .type = QEMU_OPT_BOOL, 659 .help = "Check for unintended writes into the active L1 table", 660 }, 661 { 662 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 663 .type = QEMU_OPT_BOOL, 664 .help = "Check for unintended writes into an active L2 table", 665 }, 666 { 667 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 668 .type = QEMU_OPT_BOOL, 669 .help = "Check for unintended writes into the refcount table", 670 }, 671 { 672 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 673 .type = QEMU_OPT_BOOL, 674 .help = "Check for unintended writes into a refcount block", 675 }, 676 { 677 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 678 .type = QEMU_OPT_BOOL, 679 .help = "Check for unintended writes into the snapshot table", 680 }, 681 { 682 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 683 .type = QEMU_OPT_BOOL, 684 .help = "Check for unintended writes into an inactive L1 table", 685 }, 686 { 687 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 688 .type = QEMU_OPT_BOOL, 689 .help = "Check for unintended writes into an inactive L2 table", 690 }, 691 { 692 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 693 .type = QEMU_OPT_BOOL, 694 .help = "Check for unintended writes into the bitmap directory", 695 }, 696 { 697 .name = QCOW2_OPT_CACHE_SIZE, 698 .type = QEMU_OPT_SIZE, 699 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 700 "cache size", 701 }, 702 { 703 .name = QCOW2_OPT_L2_CACHE_SIZE, 704 .type = QEMU_OPT_SIZE, 705 .help = "Maximum L2 table cache size", 706 }, 707 { 708 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 709 .type = QEMU_OPT_SIZE, 710 .help = "Size of each entry in the L2 cache", 711 }, 712 { 713 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 714 .type = QEMU_OPT_SIZE, 715 .help = "Maximum refcount block cache size", 716 }, 717 { 718 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 719 .type = QEMU_OPT_NUMBER, 720 .help = "Clean unused cache entries after this time (in seconds)", 721 }, 722 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 723 "ID of secret providing qcow2 AES key or LUKS passphrase"), 724 { /* end of list */ } 725 }, 726 }; 727 728 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 729 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 730 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 731 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 732 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 733 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 734 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 735 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 736 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 737 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 738 }; 739 740 static void cache_clean_timer_cb(void *opaque) 741 { 742 BlockDriverState *bs = opaque; 743 BDRVQcow2State *s = bs->opaque; 744 qcow2_cache_clean_unused(s->l2_table_cache); 745 qcow2_cache_clean_unused(s->refcount_block_cache); 746 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 747 (int64_t) s->cache_clean_interval * 1000); 748 } 749 750 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 751 { 752 BDRVQcow2State *s = bs->opaque; 753 if (s->cache_clean_interval > 0) { 754 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 755 SCALE_MS, cache_clean_timer_cb, 756 bs); 757 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 758 (int64_t) s->cache_clean_interval * 1000); 759 } 760 } 761 762 static void cache_clean_timer_del(BlockDriverState *bs) 763 { 764 BDRVQcow2State *s = bs->opaque; 765 if (s->cache_clean_timer) { 766 timer_del(s->cache_clean_timer); 767 timer_free(s->cache_clean_timer); 768 s->cache_clean_timer = NULL; 769 } 770 } 771 772 static void qcow2_detach_aio_context(BlockDriverState *bs) 773 { 774 cache_clean_timer_del(bs); 775 } 776 777 static void qcow2_attach_aio_context(BlockDriverState *bs, 778 AioContext *new_context) 779 { 780 cache_clean_timer_init(bs, new_context); 781 } 782 783 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 784 uint64_t *l2_cache_size, 785 uint64_t *l2_cache_entry_size, 786 uint64_t *refcount_cache_size, Error **errp) 787 { 788 BDRVQcow2State *s = bs->opaque; 789 uint64_t combined_cache_size, l2_cache_max_setting; 790 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 791 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 792 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 793 uint64_t max_l2_cache = virtual_disk_size / (s->cluster_size / 8); 794 795 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 796 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 797 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 798 799 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 800 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 801 DEFAULT_L2_CACHE_MAX_SIZE); 802 *refcount_cache_size = qemu_opt_get_size(opts, 803 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 804 805 *l2_cache_entry_size = qemu_opt_get_size( 806 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 807 808 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 809 810 if (combined_cache_size_set) { 811 if (l2_cache_size_set && refcount_cache_size_set) { 812 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 813 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 814 "at the same time"); 815 return; 816 } else if (l2_cache_size_set && 817 (l2_cache_max_setting > combined_cache_size)) { 818 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 819 QCOW2_OPT_CACHE_SIZE); 820 return; 821 } else if (*refcount_cache_size > combined_cache_size) { 822 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 823 QCOW2_OPT_CACHE_SIZE); 824 return; 825 } 826 827 if (l2_cache_size_set) { 828 *refcount_cache_size = combined_cache_size - *l2_cache_size; 829 } else if (refcount_cache_size_set) { 830 *l2_cache_size = combined_cache_size - *refcount_cache_size; 831 } else { 832 /* Assign as much memory as possible to the L2 cache, and 833 * use the remainder for the refcount cache */ 834 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 835 *l2_cache_size = max_l2_cache; 836 *refcount_cache_size = combined_cache_size - *l2_cache_size; 837 } else { 838 *refcount_cache_size = 839 MIN(combined_cache_size, min_refcount_cache); 840 *l2_cache_size = combined_cache_size - *refcount_cache_size; 841 } 842 } 843 } 844 /* l2_cache_size and refcount_cache_size are ensured to have at least 845 * their minimum values in qcow2_update_options_prepare() */ 846 847 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 848 *l2_cache_entry_size > s->cluster_size || 849 !is_power_of_2(*l2_cache_entry_size)) { 850 error_setg(errp, "L2 cache entry size must be a power of two " 851 "between %d and the cluster size (%d)", 852 1 << MIN_CLUSTER_BITS, s->cluster_size); 853 return; 854 } 855 } 856 857 typedef struct Qcow2ReopenState { 858 Qcow2Cache *l2_table_cache; 859 Qcow2Cache *refcount_block_cache; 860 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 861 bool use_lazy_refcounts; 862 int overlap_check; 863 bool discard_passthrough[QCOW2_DISCARD_MAX]; 864 uint64_t cache_clean_interval; 865 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 866 } Qcow2ReopenState; 867 868 static int qcow2_update_options_prepare(BlockDriverState *bs, 869 Qcow2ReopenState *r, 870 QDict *options, int flags, 871 Error **errp) 872 { 873 BDRVQcow2State *s = bs->opaque; 874 QemuOpts *opts = NULL; 875 const char *opt_overlap_check, *opt_overlap_check_template; 876 int overlap_check_template = 0; 877 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 878 int i; 879 const char *encryptfmt; 880 QDict *encryptopts = NULL; 881 Error *local_err = NULL; 882 int ret; 883 884 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 885 encryptfmt = qdict_get_try_str(encryptopts, "format"); 886 887 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 888 qemu_opts_absorb_qdict(opts, options, &local_err); 889 if (local_err) { 890 error_propagate(errp, local_err); 891 ret = -EINVAL; 892 goto fail; 893 } 894 895 /* get L2 table/refcount block cache size from command line options */ 896 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 897 &refcount_cache_size, &local_err); 898 if (local_err) { 899 error_propagate(errp, local_err); 900 ret = -EINVAL; 901 goto fail; 902 } 903 904 l2_cache_size /= l2_cache_entry_size; 905 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 906 l2_cache_size = MIN_L2_CACHE_SIZE; 907 } 908 if (l2_cache_size > INT_MAX) { 909 error_setg(errp, "L2 cache size too big"); 910 ret = -EINVAL; 911 goto fail; 912 } 913 914 refcount_cache_size /= s->cluster_size; 915 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 916 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 917 } 918 if (refcount_cache_size > INT_MAX) { 919 error_setg(errp, "Refcount cache size too big"); 920 ret = -EINVAL; 921 goto fail; 922 } 923 924 /* alloc new L2 table/refcount block cache, flush old one */ 925 if (s->l2_table_cache) { 926 ret = qcow2_cache_flush(bs, s->l2_table_cache); 927 if (ret) { 928 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 929 goto fail; 930 } 931 } 932 933 if (s->refcount_block_cache) { 934 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 935 if (ret) { 936 error_setg_errno(errp, -ret, 937 "Failed to flush the refcount block cache"); 938 goto fail; 939 } 940 } 941 942 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); 943 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 944 l2_cache_entry_size); 945 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 946 s->cluster_size); 947 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 948 error_setg(errp, "Could not allocate metadata caches"); 949 ret = -ENOMEM; 950 goto fail; 951 } 952 953 /* New interval for cache cleanup timer */ 954 r->cache_clean_interval = 955 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 956 DEFAULT_CACHE_CLEAN_INTERVAL); 957 #ifndef CONFIG_LINUX 958 if (r->cache_clean_interval != 0) { 959 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 960 " not supported on this host"); 961 ret = -EINVAL; 962 goto fail; 963 } 964 #endif 965 if (r->cache_clean_interval > UINT_MAX) { 966 error_setg(errp, "Cache clean interval too big"); 967 ret = -EINVAL; 968 goto fail; 969 } 970 971 /* lazy-refcounts; flush if going from enabled to disabled */ 972 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 973 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 974 if (r->use_lazy_refcounts && s->qcow_version < 3) { 975 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 976 "qemu 1.1 compatibility level"); 977 ret = -EINVAL; 978 goto fail; 979 } 980 981 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 982 ret = qcow2_mark_clean(bs); 983 if (ret < 0) { 984 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 985 goto fail; 986 } 987 } 988 989 /* Overlap check options */ 990 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 991 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 992 if (opt_overlap_check_template && opt_overlap_check && 993 strcmp(opt_overlap_check_template, opt_overlap_check)) 994 { 995 error_setg(errp, "Conflicting values for qcow2 options '" 996 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 997 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 998 ret = -EINVAL; 999 goto fail; 1000 } 1001 if (!opt_overlap_check) { 1002 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1003 } 1004 1005 if (!strcmp(opt_overlap_check, "none")) { 1006 overlap_check_template = 0; 1007 } else if (!strcmp(opt_overlap_check, "constant")) { 1008 overlap_check_template = QCOW2_OL_CONSTANT; 1009 } else if (!strcmp(opt_overlap_check, "cached")) { 1010 overlap_check_template = QCOW2_OL_CACHED; 1011 } else if (!strcmp(opt_overlap_check, "all")) { 1012 overlap_check_template = QCOW2_OL_ALL; 1013 } else { 1014 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1015 "'overlap-check'. Allowed are any of the following: " 1016 "none, constant, cached, all", opt_overlap_check); 1017 ret = -EINVAL; 1018 goto fail; 1019 } 1020 1021 r->overlap_check = 0; 1022 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1023 /* overlap-check defines a template bitmask, but every flag may be 1024 * overwritten through the associated boolean option */ 1025 r->overlap_check |= 1026 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1027 overlap_check_template & (1 << i)) << i; 1028 } 1029 1030 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1031 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1032 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1033 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1034 flags & BDRV_O_UNMAP); 1035 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1036 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1037 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1038 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1039 1040 switch (s->crypt_method_header) { 1041 case QCOW_CRYPT_NONE: 1042 if (encryptfmt) { 1043 error_setg(errp, "No encryption in image header, but options " 1044 "specified format '%s'", encryptfmt); 1045 ret = -EINVAL; 1046 goto fail; 1047 } 1048 break; 1049 1050 case QCOW_CRYPT_AES: 1051 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1052 error_setg(errp, 1053 "Header reported 'aes' encryption format but " 1054 "options specify '%s'", encryptfmt); 1055 ret = -EINVAL; 1056 goto fail; 1057 } 1058 qdict_put_str(encryptopts, "format", "qcow"); 1059 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1060 break; 1061 1062 case QCOW_CRYPT_LUKS: 1063 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1064 error_setg(errp, 1065 "Header reported 'luks' encryption format but " 1066 "options specify '%s'", encryptfmt); 1067 ret = -EINVAL; 1068 goto fail; 1069 } 1070 qdict_put_str(encryptopts, "format", "luks"); 1071 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1072 break; 1073 1074 default: 1075 error_setg(errp, "Unsupported encryption method %d", 1076 s->crypt_method_header); 1077 break; 1078 } 1079 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1080 ret = -EINVAL; 1081 goto fail; 1082 } 1083 1084 ret = 0; 1085 fail: 1086 qobject_unref(encryptopts); 1087 qemu_opts_del(opts); 1088 opts = NULL; 1089 return ret; 1090 } 1091 1092 static void qcow2_update_options_commit(BlockDriverState *bs, 1093 Qcow2ReopenState *r) 1094 { 1095 BDRVQcow2State *s = bs->opaque; 1096 int i; 1097 1098 if (s->l2_table_cache) { 1099 qcow2_cache_destroy(s->l2_table_cache); 1100 } 1101 if (s->refcount_block_cache) { 1102 qcow2_cache_destroy(s->refcount_block_cache); 1103 } 1104 s->l2_table_cache = r->l2_table_cache; 1105 s->refcount_block_cache = r->refcount_block_cache; 1106 s->l2_slice_size = r->l2_slice_size; 1107 1108 s->overlap_check = r->overlap_check; 1109 s->use_lazy_refcounts = r->use_lazy_refcounts; 1110 1111 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1112 s->discard_passthrough[i] = r->discard_passthrough[i]; 1113 } 1114 1115 if (s->cache_clean_interval != r->cache_clean_interval) { 1116 cache_clean_timer_del(bs); 1117 s->cache_clean_interval = r->cache_clean_interval; 1118 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1119 } 1120 1121 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1122 s->crypto_opts = r->crypto_opts; 1123 } 1124 1125 static void qcow2_update_options_abort(BlockDriverState *bs, 1126 Qcow2ReopenState *r) 1127 { 1128 if (r->l2_table_cache) { 1129 qcow2_cache_destroy(r->l2_table_cache); 1130 } 1131 if (r->refcount_block_cache) { 1132 qcow2_cache_destroy(r->refcount_block_cache); 1133 } 1134 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1135 } 1136 1137 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1138 int flags, Error **errp) 1139 { 1140 Qcow2ReopenState r = {}; 1141 int ret; 1142 1143 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1144 if (ret >= 0) { 1145 qcow2_update_options_commit(bs, &r); 1146 } else { 1147 qcow2_update_options_abort(bs, &r); 1148 } 1149 1150 return ret; 1151 } 1152 1153 /* Called with s->lock held. */ 1154 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, 1155 int flags, Error **errp) 1156 { 1157 BDRVQcow2State *s = bs->opaque; 1158 unsigned int len, i; 1159 int ret = 0; 1160 QCowHeader header; 1161 Error *local_err = NULL; 1162 uint64_t ext_end; 1163 uint64_t l1_vm_state_index; 1164 bool update_header = false; 1165 1166 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1167 if (ret < 0) { 1168 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1169 goto fail; 1170 } 1171 header.magic = be32_to_cpu(header.magic); 1172 header.version = be32_to_cpu(header.version); 1173 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1174 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1175 header.size = be64_to_cpu(header.size); 1176 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1177 header.crypt_method = be32_to_cpu(header.crypt_method); 1178 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1179 header.l1_size = be32_to_cpu(header.l1_size); 1180 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1181 header.refcount_table_clusters = 1182 be32_to_cpu(header.refcount_table_clusters); 1183 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1184 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1185 1186 if (header.magic != QCOW_MAGIC) { 1187 error_setg(errp, "Image is not in qcow2 format"); 1188 ret = -EINVAL; 1189 goto fail; 1190 } 1191 if (header.version < 2 || header.version > 3) { 1192 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1193 ret = -ENOTSUP; 1194 goto fail; 1195 } 1196 1197 s->qcow_version = header.version; 1198 1199 /* Initialise cluster size */ 1200 if (header.cluster_bits < MIN_CLUSTER_BITS || 1201 header.cluster_bits > MAX_CLUSTER_BITS) { 1202 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1203 header.cluster_bits); 1204 ret = -EINVAL; 1205 goto fail; 1206 } 1207 1208 s->cluster_bits = header.cluster_bits; 1209 s->cluster_size = 1 << s->cluster_bits; 1210 s->cluster_sectors = 1 << (s->cluster_bits - BDRV_SECTOR_BITS); 1211 1212 /* Initialise version 3 header fields */ 1213 if (header.version == 2) { 1214 header.incompatible_features = 0; 1215 header.compatible_features = 0; 1216 header.autoclear_features = 0; 1217 header.refcount_order = 4; 1218 header.header_length = 72; 1219 } else { 1220 header.incompatible_features = 1221 be64_to_cpu(header.incompatible_features); 1222 header.compatible_features = be64_to_cpu(header.compatible_features); 1223 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1224 header.refcount_order = be32_to_cpu(header.refcount_order); 1225 header.header_length = be32_to_cpu(header.header_length); 1226 1227 if (header.header_length < 104) { 1228 error_setg(errp, "qcow2 header too short"); 1229 ret = -EINVAL; 1230 goto fail; 1231 } 1232 } 1233 1234 if (header.header_length > s->cluster_size) { 1235 error_setg(errp, "qcow2 header exceeds cluster size"); 1236 ret = -EINVAL; 1237 goto fail; 1238 } 1239 1240 if (header.header_length > sizeof(header)) { 1241 s->unknown_header_fields_size = header.header_length - sizeof(header); 1242 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1243 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1244 s->unknown_header_fields_size); 1245 if (ret < 0) { 1246 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1247 "fields"); 1248 goto fail; 1249 } 1250 } 1251 1252 if (header.backing_file_offset > s->cluster_size) { 1253 error_setg(errp, "Invalid backing file offset"); 1254 ret = -EINVAL; 1255 goto fail; 1256 } 1257 1258 if (header.backing_file_offset) { 1259 ext_end = header.backing_file_offset; 1260 } else { 1261 ext_end = 1 << header.cluster_bits; 1262 } 1263 1264 /* Handle feature bits */ 1265 s->incompatible_features = header.incompatible_features; 1266 s->compatible_features = header.compatible_features; 1267 s->autoclear_features = header.autoclear_features; 1268 1269 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1270 void *feature_table = NULL; 1271 qcow2_read_extensions(bs, header.header_length, ext_end, 1272 &feature_table, flags, NULL, NULL); 1273 report_unsupported_feature(errp, feature_table, 1274 s->incompatible_features & 1275 ~QCOW2_INCOMPAT_MASK); 1276 ret = -ENOTSUP; 1277 g_free(feature_table); 1278 goto fail; 1279 } 1280 1281 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1282 /* Corrupt images may not be written to unless they are being repaired 1283 */ 1284 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1285 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1286 "read/write"); 1287 ret = -EACCES; 1288 goto fail; 1289 } 1290 } 1291 1292 /* Check support for various header values */ 1293 if (header.refcount_order > 6) { 1294 error_setg(errp, "Reference count entry width too large; may not " 1295 "exceed 64 bits"); 1296 ret = -EINVAL; 1297 goto fail; 1298 } 1299 s->refcount_order = header.refcount_order; 1300 s->refcount_bits = 1 << s->refcount_order; 1301 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1302 s->refcount_max += s->refcount_max - 1; 1303 1304 s->crypt_method_header = header.crypt_method; 1305 if (s->crypt_method_header) { 1306 if (bdrv_uses_whitelist() && 1307 s->crypt_method_header == QCOW_CRYPT_AES) { 1308 error_setg(errp, 1309 "Use of AES-CBC encrypted qcow2 images is no longer " 1310 "supported in system emulators"); 1311 error_append_hint(errp, 1312 "You can use 'qemu-img convert' to convert your " 1313 "image to an alternative supported format, such " 1314 "as unencrypted qcow2, or raw with the LUKS " 1315 "format instead.\n"); 1316 ret = -ENOSYS; 1317 goto fail; 1318 } 1319 1320 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1321 s->crypt_physical_offset = false; 1322 } else { 1323 /* Assuming LUKS and any future crypt methods we 1324 * add will all use physical offsets, due to the 1325 * fact that the alternative is insecure... */ 1326 s->crypt_physical_offset = true; 1327 } 1328 1329 bs->encrypted = true; 1330 } 1331 1332 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1333 s->l2_size = 1 << s->l2_bits; 1334 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1335 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1336 s->refcount_block_size = 1 << s->refcount_block_bits; 1337 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1338 s->csize_shift = (62 - (s->cluster_bits - 8)); 1339 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1340 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1341 1342 s->refcount_table_offset = header.refcount_table_offset; 1343 s->refcount_table_size = 1344 header.refcount_table_clusters << (s->cluster_bits - 3); 1345 1346 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1347 error_setg(errp, "Image does not contain a reference count table"); 1348 ret = -EINVAL; 1349 goto fail; 1350 } 1351 1352 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1353 header.refcount_table_clusters, 1354 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1355 "Reference count table", errp); 1356 if (ret < 0) { 1357 goto fail; 1358 } 1359 1360 /* The total size in bytes of the snapshot table is checked in 1361 * qcow2_read_snapshots() because the size of each snapshot is 1362 * variable and we don't know it yet. 1363 * Here we only check the offset and number of snapshots. */ 1364 ret = qcow2_validate_table(bs, header.snapshots_offset, 1365 header.nb_snapshots, 1366 sizeof(QCowSnapshotHeader), 1367 sizeof(QCowSnapshotHeader) * QCOW_MAX_SNAPSHOTS, 1368 "Snapshot table", errp); 1369 if (ret < 0) { 1370 goto fail; 1371 } 1372 1373 /* read the level 1 table */ 1374 ret = qcow2_validate_table(bs, header.l1_table_offset, 1375 header.l1_size, sizeof(uint64_t), 1376 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1377 if (ret < 0) { 1378 goto fail; 1379 } 1380 s->l1_size = header.l1_size; 1381 s->l1_table_offset = header.l1_table_offset; 1382 1383 l1_vm_state_index = size_to_l1(s, header.size); 1384 if (l1_vm_state_index > INT_MAX) { 1385 error_setg(errp, "Image is too big"); 1386 ret = -EFBIG; 1387 goto fail; 1388 } 1389 s->l1_vm_state_index = l1_vm_state_index; 1390 1391 /* the L1 table must contain at least enough entries to put 1392 header.size bytes */ 1393 if (s->l1_size < s->l1_vm_state_index) { 1394 error_setg(errp, "L1 table is too small"); 1395 ret = -EINVAL; 1396 goto fail; 1397 } 1398 1399 if (s->l1_size > 0) { 1400 s->l1_table = qemu_try_blockalign(bs->file->bs, 1401 ROUND_UP(s->l1_size * sizeof(uint64_t), 512)); 1402 if (s->l1_table == NULL) { 1403 error_setg(errp, "Could not allocate L1 table"); 1404 ret = -ENOMEM; 1405 goto fail; 1406 } 1407 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1408 s->l1_size * sizeof(uint64_t)); 1409 if (ret < 0) { 1410 error_setg_errno(errp, -ret, "Could not read L1 table"); 1411 goto fail; 1412 } 1413 for(i = 0;i < s->l1_size; i++) { 1414 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1415 } 1416 } 1417 1418 /* Parse driver-specific options */ 1419 ret = qcow2_update_options(bs, options, flags, errp); 1420 if (ret < 0) { 1421 goto fail; 1422 } 1423 1424 s->flags = flags; 1425 1426 ret = qcow2_refcount_init(bs); 1427 if (ret != 0) { 1428 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1429 goto fail; 1430 } 1431 1432 QLIST_INIT(&s->cluster_allocs); 1433 QTAILQ_INIT(&s->discards); 1434 1435 /* read qcow2 extensions */ 1436 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1437 flags, &update_header, &local_err)) { 1438 error_propagate(errp, local_err); 1439 ret = -EINVAL; 1440 goto fail; 1441 } 1442 1443 /* qcow2_read_extension may have set up the crypto context 1444 * if the crypt method needs a header region, some methods 1445 * don't need header extensions, so must check here 1446 */ 1447 if (s->crypt_method_header && !s->crypto) { 1448 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1449 unsigned int cflags = 0; 1450 if (flags & BDRV_O_NO_IO) { 1451 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1452 } 1453 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1454 NULL, NULL, cflags, 1, errp); 1455 if (!s->crypto) { 1456 ret = -EINVAL; 1457 goto fail; 1458 } 1459 } else if (!(flags & BDRV_O_NO_IO)) { 1460 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1461 s->crypt_method_header); 1462 ret = -EINVAL; 1463 goto fail; 1464 } 1465 } 1466 1467 /* read the backing file name */ 1468 if (header.backing_file_offset != 0) { 1469 len = header.backing_file_size; 1470 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1471 len >= sizeof(bs->backing_file)) { 1472 error_setg(errp, "Backing file name too long"); 1473 ret = -EINVAL; 1474 goto fail; 1475 } 1476 ret = bdrv_pread(bs->file, header.backing_file_offset, 1477 bs->auto_backing_file, len); 1478 if (ret < 0) { 1479 error_setg_errno(errp, -ret, "Could not read backing file name"); 1480 goto fail; 1481 } 1482 bs->auto_backing_file[len] = '\0'; 1483 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1484 bs->auto_backing_file); 1485 s->image_backing_file = g_strdup(bs->auto_backing_file); 1486 } 1487 1488 /* Internal snapshots */ 1489 s->snapshots_offset = header.snapshots_offset; 1490 s->nb_snapshots = header.nb_snapshots; 1491 1492 ret = qcow2_read_snapshots(bs); 1493 if (ret < 0) { 1494 error_setg_errno(errp, -ret, "Could not read snapshots"); 1495 goto fail; 1496 } 1497 1498 /* Clear unknown autoclear feature bits */ 1499 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1500 update_header = 1501 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1502 if (update_header) { 1503 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1504 } 1505 1506 /* == Handle persistent dirty bitmaps == 1507 * 1508 * We want load dirty bitmaps in three cases: 1509 * 1510 * 1. Normal open of the disk in active mode, not related to invalidation 1511 * after migration. 1512 * 1513 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1514 * bitmaps are _not_ migrating through migration channel, i.e. 1515 * 'dirty-bitmaps' capability is disabled. 1516 * 1517 * 3. Invalidation of source vm after failed or canceled migration. 1518 * This is a very interesting case. There are two possible types of 1519 * bitmaps: 1520 * 1521 * A. Stored on inactivation and removed. They should be loaded from the 1522 * image. 1523 * 1524 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1525 * the migration channel (with dirty-bitmaps capability). 1526 * 1527 * On the other hand, there are two possible sub-cases: 1528 * 1529 * 3.1 disk was changed by somebody else while were inactive. In this 1530 * case all in-RAM dirty bitmaps (both persistent and not) are 1531 * definitely invalid. And we don't have any method to determine 1532 * this. 1533 * 1534 * Simple and safe thing is to just drop all the bitmaps of type B on 1535 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1536 * 1537 * On the other hand, resuming source vm, if disk was already changed 1538 * is a bad thing anyway: not only bitmaps, the whole vm state is 1539 * out of sync with disk. 1540 * 1541 * This means, that user or management tool, who for some reason 1542 * decided to resume source vm, after disk was already changed by 1543 * target vm, should at least drop all dirty bitmaps by hand. 1544 * 1545 * So, we can ignore this case for now, but TODO: "generation" 1546 * extension for qcow2, to determine, that image was changed after 1547 * last inactivation. And if it is changed, we will drop (or at least 1548 * mark as 'invalid' all the bitmaps of type B, both persistent 1549 * and not). 1550 * 1551 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1552 * to disk ('dirty-bitmaps' capability disabled), or not saved 1553 * ('dirty-bitmaps' capability enabled), but we don't need to care 1554 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1555 * and not stored has flag IN_USE=1 in the image and will be skipped 1556 * on loading. 1557 * 1558 * One remaining possible case when we don't want load bitmaps: 1559 * 1560 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1561 * will be loaded on invalidation, no needs try loading them before) 1562 */ 1563 1564 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1565 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1566 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err); 1567 1568 update_header = update_header && !header_updated; 1569 } 1570 if (local_err != NULL) { 1571 error_propagate(errp, local_err); 1572 ret = -EINVAL; 1573 goto fail; 1574 } 1575 1576 if (update_header) { 1577 ret = qcow2_update_header(bs); 1578 if (ret < 0) { 1579 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1580 goto fail; 1581 } 1582 } 1583 1584 bs->supported_zero_flags = header.version >= 3 ? BDRV_REQ_MAY_UNMAP : 0; 1585 1586 /* Repair image if dirty */ 1587 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1588 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1589 BdrvCheckResult result = {0}; 1590 1591 ret = qcow2_co_check_locked(bs, &result, 1592 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1593 if (ret < 0 || result.check_errors) { 1594 if (ret >= 0) { 1595 ret = -EIO; 1596 } 1597 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1598 goto fail; 1599 } 1600 } 1601 1602 #ifdef DEBUG_ALLOC 1603 { 1604 BdrvCheckResult result = {0}; 1605 qcow2_check_refcounts(bs, &result, 0); 1606 } 1607 #endif 1608 1609 qemu_co_queue_init(&s->compress_wait_queue); 1610 1611 return ret; 1612 1613 fail: 1614 g_free(s->unknown_header_fields); 1615 cleanup_unknown_header_ext(bs); 1616 qcow2_free_snapshots(bs); 1617 qcow2_refcount_close(bs); 1618 qemu_vfree(s->l1_table); 1619 /* else pre-write overlap checks in cache_destroy may crash */ 1620 s->l1_table = NULL; 1621 cache_clean_timer_del(bs); 1622 if (s->l2_table_cache) { 1623 qcow2_cache_destroy(s->l2_table_cache); 1624 } 1625 if (s->refcount_block_cache) { 1626 qcow2_cache_destroy(s->refcount_block_cache); 1627 } 1628 qcrypto_block_free(s->crypto); 1629 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1630 return ret; 1631 } 1632 1633 typedef struct QCow2OpenCo { 1634 BlockDriverState *bs; 1635 QDict *options; 1636 int flags; 1637 Error **errp; 1638 int ret; 1639 } QCow2OpenCo; 1640 1641 static void coroutine_fn qcow2_open_entry(void *opaque) 1642 { 1643 QCow2OpenCo *qoc = opaque; 1644 BDRVQcow2State *s = qoc->bs->opaque; 1645 1646 qemu_co_mutex_lock(&s->lock); 1647 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 1648 qemu_co_mutex_unlock(&s->lock); 1649 } 1650 1651 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1652 Error **errp) 1653 { 1654 BDRVQcow2State *s = bs->opaque; 1655 QCow2OpenCo qoc = { 1656 .bs = bs, 1657 .options = options, 1658 .flags = flags, 1659 .errp = errp, 1660 .ret = -EINPROGRESS 1661 }; 1662 1663 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1664 false, errp); 1665 if (!bs->file) { 1666 return -EINVAL; 1667 } 1668 1669 /* Initialise locks */ 1670 qemu_co_mutex_init(&s->lock); 1671 1672 if (qemu_in_coroutine()) { 1673 /* From bdrv_co_create. */ 1674 qcow2_open_entry(&qoc); 1675 } else { 1676 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1677 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1678 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1679 } 1680 return qoc.ret; 1681 } 1682 1683 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1684 { 1685 BDRVQcow2State *s = bs->opaque; 1686 1687 if (bs->encrypted) { 1688 /* Encryption works on a sector granularity */ 1689 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1690 } 1691 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1692 bs->bl.pdiscard_alignment = s->cluster_size; 1693 } 1694 1695 static int qcow2_reopen_prepare(BDRVReopenState *state, 1696 BlockReopenQueue *queue, Error **errp) 1697 { 1698 Qcow2ReopenState *r; 1699 int ret; 1700 1701 r = g_new0(Qcow2ReopenState, 1); 1702 state->opaque = r; 1703 1704 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1705 state->flags, errp); 1706 if (ret < 0) { 1707 goto fail; 1708 } 1709 1710 /* We need to write out any unwritten data if we reopen read-only. */ 1711 if ((state->flags & BDRV_O_RDWR) == 0) { 1712 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1713 if (ret < 0) { 1714 goto fail; 1715 } 1716 1717 ret = bdrv_flush(state->bs); 1718 if (ret < 0) { 1719 goto fail; 1720 } 1721 1722 ret = qcow2_mark_clean(state->bs); 1723 if (ret < 0) { 1724 goto fail; 1725 } 1726 } 1727 1728 return 0; 1729 1730 fail: 1731 qcow2_update_options_abort(state->bs, r); 1732 g_free(r); 1733 return ret; 1734 } 1735 1736 static void qcow2_reopen_commit(BDRVReopenState *state) 1737 { 1738 qcow2_update_options_commit(state->bs, state->opaque); 1739 g_free(state->opaque); 1740 } 1741 1742 static void qcow2_reopen_abort(BDRVReopenState *state) 1743 { 1744 qcow2_update_options_abort(state->bs, state->opaque); 1745 g_free(state->opaque); 1746 } 1747 1748 static void qcow2_join_options(QDict *options, QDict *old_options) 1749 { 1750 bool has_new_overlap_template = 1751 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1752 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1753 bool has_new_total_cache_size = 1754 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1755 bool has_all_cache_options; 1756 1757 /* New overlap template overrides all old overlap options */ 1758 if (has_new_overlap_template) { 1759 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1760 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1761 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1762 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1763 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1764 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1765 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1766 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1767 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1768 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1769 } 1770 1771 /* New total cache size overrides all old options */ 1772 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1773 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1774 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1775 } 1776 1777 qdict_join(options, old_options, false); 1778 1779 /* 1780 * If after merging all cache size options are set, an old total size is 1781 * overwritten. Do keep all options, however, if all three are new. The 1782 * resulting error message is what we want to happen. 1783 */ 1784 has_all_cache_options = 1785 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1786 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1787 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1788 1789 if (has_all_cache_options && !has_new_total_cache_size) { 1790 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1791 } 1792 } 1793 1794 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, 1795 bool want_zero, 1796 int64_t offset, int64_t count, 1797 int64_t *pnum, int64_t *map, 1798 BlockDriverState **file) 1799 { 1800 BDRVQcow2State *s = bs->opaque; 1801 uint64_t cluster_offset; 1802 int index_in_cluster, ret; 1803 unsigned int bytes; 1804 int status = 0; 1805 1806 bytes = MIN(INT_MAX, count); 1807 qemu_co_mutex_lock(&s->lock); 1808 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset); 1809 qemu_co_mutex_unlock(&s->lock); 1810 if (ret < 0) { 1811 return ret; 1812 } 1813 1814 *pnum = bytes; 1815 1816 if (cluster_offset != 0 && ret != QCOW2_CLUSTER_COMPRESSED && 1817 !s->crypto) { 1818 index_in_cluster = offset & (s->cluster_size - 1); 1819 *map = cluster_offset | index_in_cluster; 1820 *file = bs->file->bs; 1821 status |= BDRV_BLOCK_OFFSET_VALID; 1822 } 1823 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1824 status |= BDRV_BLOCK_ZERO; 1825 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1826 status |= BDRV_BLOCK_DATA; 1827 } 1828 return status; 1829 } 1830 1831 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, 1832 QCowL2Meta **pl2meta, 1833 bool link_l2) 1834 { 1835 int ret = 0; 1836 QCowL2Meta *l2meta = *pl2meta; 1837 1838 while (l2meta != NULL) { 1839 QCowL2Meta *next; 1840 1841 if (link_l2) { 1842 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1843 if (ret) { 1844 goto out; 1845 } 1846 } else { 1847 qcow2_alloc_cluster_abort(bs, l2meta); 1848 } 1849 1850 /* Take the request off the list of running requests */ 1851 if (l2meta->nb_clusters != 0) { 1852 QLIST_REMOVE(l2meta, next_in_flight); 1853 } 1854 1855 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1856 1857 next = l2meta->next; 1858 g_free(l2meta); 1859 l2meta = next; 1860 } 1861 out: 1862 *pl2meta = l2meta; 1863 return ret; 1864 } 1865 1866 static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, 1867 uint64_t bytes, QEMUIOVector *qiov, 1868 int flags) 1869 { 1870 BDRVQcow2State *s = bs->opaque; 1871 int offset_in_cluster; 1872 int ret; 1873 unsigned int cur_bytes; /* number of bytes in current iteration */ 1874 uint64_t cluster_offset = 0; 1875 uint64_t bytes_done = 0; 1876 QEMUIOVector hd_qiov; 1877 uint8_t *cluster_data = NULL; 1878 1879 qemu_iovec_init(&hd_qiov, qiov->niov); 1880 1881 qemu_co_mutex_lock(&s->lock); 1882 1883 while (bytes != 0) { 1884 1885 /* prepare next request */ 1886 cur_bytes = MIN(bytes, INT_MAX); 1887 if (s->crypto) { 1888 cur_bytes = MIN(cur_bytes, 1889 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1890 } 1891 1892 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 1893 if (ret < 0) { 1894 goto fail; 1895 } 1896 1897 offset_in_cluster = offset_into_cluster(s, offset); 1898 1899 qemu_iovec_reset(&hd_qiov); 1900 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 1901 1902 switch (ret) { 1903 case QCOW2_CLUSTER_UNALLOCATED: 1904 1905 if (bs->backing) { 1906 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 1907 qemu_co_mutex_unlock(&s->lock); 1908 ret = bdrv_co_preadv(bs->backing, offset, cur_bytes, 1909 &hd_qiov, 0); 1910 qemu_co_mutex_lock(&s->lock); 1911 if (ret < 0) { 1912 goto fail; 1913 } 1914 } else { 1915 /* Note: in this case, no need to wait */ 1916 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1917 } 1918 break; 1919 1920 case QCOW2_CLUSTER_ZERO_PLAIN: 1921 case QCOW2_CLUSTER_ZERO_ALLOC: 1922 qemu_iovec_memset(&hd_qiov, 0, 0, cur_bytes); 1923 break; 1924 1925 case QCOW2_CLUSTER_COMPRESSED: 1926 qemu_co_mutex_unlock(&s->lock); 1927 ret = qcow2_co_preadv_compressed(bs, cluster_offset, 1928 offset, cur_bytes, 1929 &hd_qiov); 1930 qemu_co_mutex_lock(&s->lock); 1931 if (ret < 0) { 1932 goto fail; 1933 } 1934 1935 break; 1936 1937 case QCOW2_CLUSTER_NORMAL: 1938 if ((cluster_offset & 511) != 0) { 1939 ret = -EIO; 1940 goto fail; 1941 } 1942 1943 if (bs->encrypted) { 1944 assert(s->crypto); 1945 1946 /* 1947 * For encrypted images, read everything into a temporary 1948 * contiguous buffer on which the AES functions can work. 1949 */ 1950 if (!cluster_data) { 1951 cluster_data = 1952 qemu_try_blockalign(bs->file->bs, 1953 QCOW_MAX_CRYPT_CLUSTERS 1954 * s->cluster_size); 1955 if (cluster_data == NULL) { 1956 ret = -ENOMEM; 1957 goto fail; 1958 } 1959 } 1960 1961 assert(cur_bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 1962 qemu_iovec_reset(&hd_qiov); 1963 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 1964 } 1965 1966 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 1967 qemu_co_mutex_unlock(&s->lock); 1968 ret = bdrv_co_preadv(bs->file, 1969 cluster_offset + offset_in_cluster, 1970 cur_bytes, &hd_qiov, 0); 1971 qemu_co_mutex_lock(&s->lock); 1972 if (ret < 0) { 1973 goto fail; 1974 } 1975 if (bs->encrypted) { 1976 assert(s->crypto); 1977 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0); 1978 assert((cur_bytes & (BDRV_SECTOR_SIZE - 1)) == 0); 1979 if (qcrypto_block_decrypt(s->crypto, 1980 (s->crypt_physical_offset ? 1981 cluster_offset + offset_in_cluster : 1982 offset), 1983 cluster_data, 1984 cur_bytes, 1985 NULL) < 0) { 1986 ret = -EIO; 1987 goto fail; 1988 } 1989 qemu_iovec_from_buf(qiov, bytes_done, cluster_data, cur_bytes); 1990 } 1991 break; 1992 1993 default: 1994 g_assert_not_reached(); 1995 ret = -EIO; 1996 goto fail; 1997 } 1998 1999 bytes -= cur_bytes; 2000 offset += cur_bytes; 2001 bytes_done += cur_bytes; 2002 } 2003 ret = 0; 2004 2005 fail: 2006 qemu_co_mutex_unlock(&s->lock); 2007 2008 qemu_iovec_destroy(&hd_qiov); 2009 qemu_vfree(cluster_data); 2010 2011 return ret; 2012 } 2013 2014 /* Check if it's possible to merge a write request with the writing of 2015 * the data from the COW regions */ 2016 static bool merge_cow(uint64_t offset, unsigned bytes, 2017 QEMUIOVector *hd_qiov, QCowL2Meta *l2meta) 2018 { 2019 QCowL2Meta *m; 2020 2021 for (m = l2meta; m != NULL; m = m->next) { 2022 /* If both COW regions are empty then there's nothing to merge */ 2023 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2024 continue; 2025 } 2026 2027 /* The data (middle) region must be immediately after the 2028 * start region */ 2029 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2030 continue; 2031 } 2032 2033 /* The end region must be immediately after the data (middle) 2034 * region */ 2035 if (m->offset + m->cow_end.offset != offset + bytes) { 2036 continue; 2037 } 2038 2039 /* Make sure that adding both COW regions to the QEMUIOVector 2040 * does not exceed IOV_MAX */ 2041 if (hd_qiov->niov > IOV_MAX - 2) { 2042 continue; 2043 } 2044 2045 m->data_qiov = hd_qiov; 2046 return true; 2047 } 2048 2049 return false; 2050 } 2051 2052 static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, 2053 uint64_t bytes, QEMUIOVector *qiov, 2054 int flags) 2055 { 2056 BDRVQcow2State *s = bs->opaque; 2057 int offset_in_cluster; 2058 int ret; 2059 unsigned int cur_bytes; /* number of sectors in current iteration */ 2060 uint64_t cluster_offset; 2061 QEMUIOVector hd_qiov; 2062 uint64_t bytes_done = 0; 2063 uint8_t *cluster_data = NULL; 2064 QCowL2Meta *l2meta = NULL; 2065 2066 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2067 2068 qemu_iovec_init(&hd_qiov, qiov->niov); 2069 2070 qemu_co_mutex_lock(&s->lock); 2071 2072 while (bytes != 0) { 2073 2074 l2meta = NULL; 2075 2076 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2077 offset_in_cluster = offset_into_cluster(s, offset); 2078 cur_bytes = MIN(bytes, INT_MAX); 2079 if (bs->encrypted) { 2080 cur_bytes = MIN(cur_bytes, 2081 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2082 - offset_in_cluster); 2083 } 2084 2085 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2086 &cluster_offset, &l2meta); 2087 if (ret < 0) { 2088 goto fail; 2089 } 2090 2091 assert((cluster_offset & 511) == 0); 2092 2093 qemu_iovec_reset(&hd_qiov); 2094 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes); 2095 2096 if (bs->encrypted) { 2097 assert(s->crypto); 2098 if (!cluster_data) { 2099 cluster_data = qemu_try_blockalign(bs->file->bs, 2100 QCOW_MAX_CRYPT_CLUSTERS 2101 * s->cluster_size); 2102 if (cluster_data == NULL) { 2103 ret = -ENOMEM; 2104 goto fail; 2105 } 2106 } 2107 2108 assert(hd_qiov.size <= 2109 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2110 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 2111 2112 if (qcrypto_block_encrypt(s->crypto, 2113 (s->crypt_physical_offset ? 2114 cluster_offset + offset_in_cluster : 2115 offset), 2116 cluster_data, 2117 cur_bytes, NULL) < 0) { 2118 ret = -EIO; 2119 goto fail; 2120 } 2121 2122 qemu_iovec_reset(&hd_qiov); 2123 qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes); 2124 } 2125 2126 ret = qcow2_pre_write_overlap_check(bs, 0, 2127 cluster_offset + offset_in_cluster, cur_bytes); 2128 if (ret < 0) { 2129 goto fail; 2130 } 2131 2132 /* If we need to do COW, check if it's possible to merge the 2133 * writing of the guest data together with that of the COW regions. 2134 * If it's not possible (or not necessary) then write the 2135 * guest data now. */ 2136 if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) { 2137 qemu_co_mutex_unlock(&s->lock); 2138 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2139 trace_qcow2_writev_data(qemu_coroutine_self(), 2140 cluster_offset + offset_in_cluster); 2141 ret = bdrv_co_pwritev(bs->file, 2142 cluster_offset + offset_in_cluster, 2143 cur_bytes, &hd_qiov, 0); 2144 qemu_co_mutex_lock(&s->lock); 2145 if (ret < 0) { 2146 goto fail; 2147 } 2148 } 2149 2150 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2151 if (ret) { 2152 goto fail; 2153 } 2154 2155 bytes -= cur_bytes; 2156 offset += cur_bytes; 2157 bytes_done += cur_bytes; 2158 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2159 } 2160 ret = 0; 2161 2162 fail: 2163 qcow2_handle_l2meta(bs, &l2meta, false); 2164 2165 qemu_co_mutex_unlock(&s->lock); 2166 2167 qemu_iovec_destroy(&hd_qiov); 2168 qemu_vfree(cluster_data); 2169 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2170 2171 return ret; 2172 } 2173 2174 static int qcow2_inactivate(BlockDriverState *bs) 2175 { 2176 BDRVQcow2State *s = bs->opaque; 2177 int ret, result = 0; 2178 Error *local_err = NULL; 2179 2180 qcow2_store_persistent_dirty_bitmaps(bs, &local_err); 2181 if (local_err != NULL) { 2182 result = -EINVAL; 2183 error_reportf_err(local_err, "Lost persistent bitmaps during " 2184 "inactivation of node '%s': ", 2185 bdrv_get_device_or_node_name(bs)); 2186 } 2187 2188 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2189 if (ret) { 2190 result = ret; 2191 error_report("Failed to flush the L2 table cache: %s", 2192 strerror(-ret)); 2193 } 2194 2195 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2196 if (ret) { 2197 result = ret; 2198 error_report("Failed to flush the refcount block cache: %s", 2199 strerror(-ret)); 2200 } 2201 2202 if (result == 0) { 2203 qcow2_mark_clean(bs); 2204 } 2205 2206 return result; 2207 } 2208 2209 static void qcow2_close(BlockDriverState *bs) 2210 { 2211 BDRVQcow2State *s = bs->opaque; 2212 qemu_vfree(s->l1_table); 2213 /* else pre-write overlap checks in cache_destroy may crash */ 2214 s->l1_table = NULL; 2215 2216 if (!(s->flags & BDRV_O_INACTIVE)) { 2217 qcow2_inactivate(bs); 2218 } 2219 2220 cache_clean_timer_del(bs); 2221 qcow2_cache_destroy(s->l2_table_cache); 2222 qcow2_cache_destroy(s->refcount_block_cache); 2223 2224 qcrypto_block_free(s->crypto); 2225 s->crypto = NULL; 2226 2227 g_free(s->unknown_header_fields); 2228 cleanup_unknown_header_ext(bs); 2229 2230 g_free(s->image_backing_file); 2231 g_free(s->image_backing_format); 2232 2233 qcow2_refcount_close(bs); 2234 qcow2_free_snapshots(bs); 2235 } 2236 2237 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, 2238 Error **errp) 2239 { 2240 BDRVQcow2State *s = bs->opaque; 2241 int flags = s->flags; 2242 QCryptoBlock *crypto = NULL; 2243 QDict *options; 2244 Error *local_err = NULL; 2245 int ret; 2246 2247 /* 2248 * Backing files are read-only which makes all of their metadata immutable, 2249 * that means we don't have to worry about reopening them here. 2250 */ 2251 2252 crypto = s->crypto; 2253 s->crypto = NULL; 2254 2255 qcow2_close(bs); 2256 2257 memset(s, 0, sizeof(BDRVQcow2State)); 2258 options = qdict_clone_shallow(bs->options); 2259 2260 flags &= ~BDRV_O_INACTIVE; 2261 qemu_co_mutex_lock(&s->lock); 2262 ret = qcow2_do_open(bs, options, flags, &local_err); 2263 qemu_co_mutex_unlock(&s->lock); 2264 qobject_unref(options); 2265 if (local_err) { 2266 error_propagate_prepend(errp, local_err, 2267 "Could not reopen qcow2 layer: "); 2268 bs->drv = NULL; 2269 return; 2270 } else if (ret < 0) { 2271 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2272 bs->drv = NULL; 2273 return; 2274 } 2275 2276 s->crypto = crypto; 2277 } 2278 2279 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2280 size_t len, size_t buflen) 2281 { 2282 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2283 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2284 2285 if (buflen < ext_len) { 2286 return -ENOSPC; 2287 } 2288 2289 *ext_backing_fmt = (QCowExtension) { 2290 .magic = cpu_to_be32(magic), 2291 .len = cpu_to_be32(len), 2292 }; 2293 2294 if (len) { 2295 memcpy(buf + sizeof(QCowExtension), s, len); 2296 } 2297 2298 return ext_len; 2299 } 2300 2301 /* 2302 * Updates the qcow2 header, including the variable length parts of it, i.e. 2303 * the backing file name and all extensions. qcow2 was not designed to allow 2304 * such changes, so if we run out of space (we can only use the first cluster) 2305 * this function may fail. 2306 * 2307 * Returns 0 on success, -errno in error cases. 2308 */ 2309 int qcow2_update_header(BlockDriverState *bs) 2310 { 2311 BDRVQcow2State *s = bs->opaque; 2312 QCowHeader *header; 2313 char *buf; 2314 size_t buflen = s->cluster_size; 2315 int ret; 2316 uint64_t total_size; 2317 uint32_t refcount_table_clusters; 2318 size_t header_length; 2319 Qcow2UnknownHeaderExtension *uext; 2320 2321 buf = qemu_blockalign(bs, buflen); 2322 2323 /* Header structure */ 2324 header = (QCowHeader*) buf; 2325 2326 if (buflen < sizeof(*header)) { 2327 ret = -ENOSPC; 2328 goto fail; 2329 } 2330 2331 header_length = sizeof(*header) + s->unknown_header_fields_size; 2332 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2333 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2334 2335 *header = (QCowHeader) { 2336 /* Version 2 fields */ 2337 .magic = cpu_to_be32(QCOW_MAGIC), 2338 .version = cpu_to_be32(s->qcow_version), 2339 .backing_file_offset = 0, 2340 .backing_file_size = 0, 2341 .cluster_bits = cpu_to_be32(s->cluster_bits), 2342 .size = cpu_to_be64(total_size), 2343 .crypt_method = cpu_to_be32(s->crypt_method_header), 2344 .l1_size = cpu_to_be32(s->l1_size), 2345 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2346 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2347 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2348 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2349 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2350 2351 /* Version 3 fields */ 2352 .incompatible_features = cpu_to_be64(s->incompatible_features), 2353 .compatible_features = cpu_to_be64(s->compatible_features), 2354 .autoclear_features = cpu_to_be64(s->autoclear_features), 2355 .refcount_order = cpu_to_be32(s->refcount_order), 2356 .header_length = cpu_to_be32(header_length), 2357 }; 2358 2359 /* For older versions, write a shorter header */ 2360 switch (s->qcow_version) { 2361 case 2: 2362 ret = offsetof(QCowHeader, incompatible_features); 2363 break; 2364 case 3: 2365 ret = sizeof(*header); 2366 break; 2367 default: 2368 ret = -EINVAL; 2369 goto fail; 2370 } 2371 2372 buf += ret; 2373 buflen -= ret; 2374 memset(buf, 0, buflen); 2375 2376 /* Preserve any unknown field in the header */ 2377 if (s->unknown_header_fields_size) { 2378 if (buflen < s->unknown_header_fields_size) { 2379 ret = -ENOSPC; 2380 goto fail; 2381 } 2382 2383 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2384 buf += s->unknown_header_fields_size; 2385 buflen -= s->unknown_header_fields_size; 2386 } 2387 2388 /* Backing file format header extension */ 2389 if (s->image_backing_format) { 2390 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2391 s->image_backing_format, 2392 strlen(s->image_backing_format), 2393 buflen); 2394 if (ret < 0) { 2395 goto fail; 2396 } 2397 2398 buf += ret; 2399 buflen -= ret; 2400 } 2401 2402 /* Full disk encryption header pointer extension */ 2403 if (s->crypto_header.offset != 0) { 2404 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 2405 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 2406 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2407 &s->crypto_header, sizeof(s->crypto_header), 2408 buflen); 2409 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 2410 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 2411 if (ret < 0) { 2412 goto fail; 2413 } 2414 buf += ret; 2415 buflen -= ret; 2416 } 2417 2418 /* Feature table */ 2419 if (s->qcow_version >= 3) { 2420 Qcow2Feature features[] = { 2421 { 2422 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2423 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2424 .name = "dirty bit", 2425 }, 2426 { 2427 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2428 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2429 .name = "corrupt bit", 2430 }, 2431 { 2432 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2433 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2434 .name = "lazy refcounts", 2435 }, 2436 }; 2437 2438 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2439 features, sizeof(features), buflen); 2440 if (ret < 0) { 2441 goto fail; 2442 } 2443 buf += ret; 2444 buflen -= ret; 2445 } 2446 2447 /* Bitmap extension */ 2448 if (s->nb_bitmaps > 0) { 2449 Qcow2BitmapHeaderExt bitmaps_header = { 2450 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2451 .bitmap_directory_size = 2452 cpu_to_be64(s->bitmap_directory_size), 2453 .bitmap_directory_offset = 2454 cpu_to_be64(s->bitmap_directory_offset) 2455 }; 2456 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2457 &bitmaps_header, sizeof(bitmaps_header), 2458 buflen); 2459 if (ret < 0) { 2460 goto fail; 2461 } 2462 buf += ret; 2463 buflen -= ret; 2464 } 2465 2466 /* Keep unknown header extensions */ 2467 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2468 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2469 if (ret < 0) { 2470 goto fail; 2471 } 2472 2473 buf += ret; 2474 buflen -= ret; 2475 } 2476 2477 /* End of header extensions */ 2478 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2479 if (ret < 0) { 2480 goto fail; 2481 } 2482 2483 buf += ret; 2484 buflen -= ret; 2485 2486 /* Backing file name */ 2487 if (s->image_backing_file) { 2488 size_t backing_file_len = strlen(s->image_backing_file); 2489 2490 if (buflen < backing_file_len) { 2491 ret = -ENOSPC; 2492 goto fail; 2493 } 2494 2495 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2496 strncpy(buf, s->image_backing_file, buflen); 2497 2498 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2499 header->backing_file_size = cpu_to_be32(backing_file_len); 2500 } 2501 2502 /* Write the new header */ 2503 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2504 if (ret < 0) { 2505 goto fail; 2506 } 2507 2508 ret = 0; 2509 fail: 2510 qemu_vfree(header); 2511 return ret; 2512 } 2513 2514 static int qcow2_change_backing_file(BlockDriverState *bs, 2515 const char *backing_file, const char *backing_fmt) 2516 { 2517 BDRVQcow2State *s = bs->opaque; 2518 2519 if (backing_file && strlen(backing_file) > 1023) { 2520 return -EINVAL; 2521 } 2522 2523 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 2524 backing_file ?: ""); 2525 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2526 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2527 2528 g_free(s->image_backing_file); 2529 g_free(s->image_backing_format); 2530 2531 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2532 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2533 2534 return qcow2_update_header(bs); 2535 } 2536 2537 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2538 { 2539 if (g_str_equal(encryptfmt, "luks")) { 2540 return QCOW_CRYPT_LUKS; 2541 } else if (g_str_equal(encryptfmt, "aes")) { 2542 return QCOW_CRYPT_AES; 2543 } else { 2544 return -EINVAL; 2545 } 2546 } 2547 2548 static int qcow2_set_up_encryption(BlockDriverState *bs, 2549 QCryptoBlockCreateOptions *cryptoopts, 2550 Error **errp) 2551 { 2552 BDRVQcow2State *s = bs->opaque; 2553 QCryptoBlock *crypto = NULL; 2554 int fmt, ret; 2555 2556 switch (cryptoopts->format) { 2557 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 2558 fmt = QCOW_CRYPT_LUKS; 2559 break; 2560 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 2561 fmt = QCOW_CRYPT_AES; 2562 break; 2563 default: 2564 error_setg(errp, "Crypto format not supported in qcow2"); 2565 return -EINVAL; 2566 } 2567 2568 s->crypt_method_header = fmt; 2569 2570 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2571 qcow2_crypto_hdr_init_func, 2572 qcow2_crypto_hdr_write_func, 2573 bs, errp); 2574 if (!crypto) { 2575 return -EINVAL; 2576 } 2577 2578 ret = qcow2_update_header(bs); 2579 if (ret < 0) { 2580 error_setg_errno(errp, -ret, "Could not write encryption header"); 2581 goto out; 2582 } 2583 2584 ret = 0; 2585 out: 2586 qcrypto_block_free(crypto); 2587 return ret; 2588 } 2589 2590 /** 2591 * Preallocates metadata structures for data clusters between @offset (in the 2592 * guest disk) and @new_length (which is thus generally the new guest disk 2593 * size). 2594 * 2595 * Returns: 0 on success, -errno on failure. 2596 */ 2597 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset, 2598 uint64_t new_length) 2599 { 2600 uint64_t bytes; 2601 uint64_t host_offset = 0; 2602 unsigned int cur_bytes; 2603 int ret; 2604 QCowL2Meta *meta; 2605 2606 assert(offset <= new_length); 2607 bytes = new_length - offset; 2608 2609 while (bytes) { 2610 cur_bytes = MIN(bytes, INT_MAX); 2611 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2612 &host_offset, &meta); 2613 if (ret < 0) { 2614 return ret; 2615 } 2616 2617 while (meta) { 2618 QCowL2Meta *next = meta->next; 2619 2620 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2621 if (ret < 0) { 2622 qcow2_free_any_clusters(bs, meta->alloc_offset, 2623 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2624 return ret; 2625 } 2626 2627 /* There are no dependent requests, but we need to remove our 2628 * request from the list of in-flight requests */ 2629 QLIST_REMOVE(meta, next_in_flight); 2630 2631 g_free(meta); 2632 meta = next; 2633 } 2634 2635 /* TODO Preallocate data if requested */ 2636 2637 bytes -= cur_bytes; 2638 offset += cur_bytes; 2639 } 2640 2641 /* 2642 * It is expected that the image file is large enough to actually contain 2643 * all of the allocated clusters (otherwise we get failing reads after 2644 * EOF). Extend the image to the last allocated sector. 2645 */ 2646 if (host_offset != 0) { 2647 uint8_t data = 0; 2648 ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1, 2649 &data, 1); 2650 if (ret < 0) { 2651 return ret; 2652 } 2653 } 2654 2655 return 0; 2656 } 2657 2658 /* qcow2_refcount_metadata_size: 2659 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 2660 * @cluster_size: size of a cluster, in bytes 2661 * @refcount_order: refcount bits power-of-2 exponent 2662 * @generous_increase: allow for the refcount table to be 1.5x as large as it 2663 * needs to be 2664 * 2665 * Returns: Number of bytes required for refcount blocks and table metadata. 2666 */ 2667 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 2668 int refcount_order, bool generous_increase, 2669 uint64_t *refblock_count) 2670 { 2671 /* 2672 * Every host cluster is reference-counted, including metadata (even 2673 * refcount metadata is recursively included). 2674 * 2675 * An accurate formula for the size of refcount metadata size is difficult 2676 * to derive. An easier method of calculation is finding the fixed point 2677 * where no further refcount blocks or table clusters are required to 2678 * reference count every cluster. 2679 */ 2680 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 2681 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 2682 int64_t table = 0; /* number of refcount table clusters */ 2683 int64_t blocks = 0; /* number of refcount block clusters */ 2684 int64_t last; 2685 int64_t n = 0; 2686 2687 do { 2688 last = n; 2689 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 2690 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 2691 n = clusters + blocks + table; 2692 2693 if (n == last && generous_increase) { 2694 clusters += DIV_ROUND_UP(table, 2); 2695 n = 0; /* force another loop */ 2696 generous_increase = false; 2697 } 2698 } while (n != last); 2699 2700 if (refblock_count) { 2701 *refblock_count = blocks; 2702 } 2703 2704 return (blocks + table) * cluster_size; 2705 } 2706 2707 /** 2708 * qcow2_calc_prealloc_size: 2709 * @total_size: virtual disk size in bytes 2710 * @cluster_size: cluster size in bytes 2711 * @refcount_order: refcount bits power-of-2 exponent 2712 * 2713 * Returns: Total number of bytes required for the fully allocated image 2714 * (including metadata). 2715 */ 2716 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 2717 size_t cluster_size, 2718 int refcount_order) 2719 { 2720 int64_t meta_size = 0; 2721 uint64_t nl1e, nl2e; 2722 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 2723 2724 /* header: 1 cluster */ 2725 meta_size += cluster_size; 2726 2727 /* total size of L2 tables */ 2728 nl2e = aligned_total_size / cluster_size; 2729 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t)); 2730 meta_size += nl2e * sizeof(uint64_t); 2731 2732 /* total size of L1 tables */ 2733 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 2734 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t)); 2735 meta_size += nl1e * sizeof(uint64_t); 2736 2737 /* total size of refcount table and blocks */ 2738 meta_size += qcow2_refcount_metadata_size( 2739 (meta_size + aligned_total_size) / cluster_size, 2740 cluster_size, refcount_order, false, NULL); 2741 2742 return meta_size + aligned_total_size; 2743 } 2744 2745 static bool validate_cluster_size(size_t cluster_size, Error **errp) 2746 { 2747 int cluster_bits = ctz32(cluster_size); 2748 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 2749 (1 << cluster_bits) != cluster_size) 2750 { 2751 error_setg(errp, "Cluster size must be a power of two between %d and " 2752 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 2753 return false; 2754 } 2755 return true; 2756 } 2757 2758 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 2759 { 2760 size_t cluster_size; 2761 2762 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 2763 DEFAULT_CLUSTER_SIZE); 2764 if (!validate_cluster_size(cluster_size, errp)) { 2765 return 0; 2766 } 2767 return cluster_size; 2768 } 2769 2770 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 2771 { 2772 char *buf; 2773 int ret; 2774 2775 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 2776 if (!buf) { 2777 ret = 3; /* default */ 2778 } else if (!strcmp(buf, "0.10")) { 2779 ret = 2; 2780 } else if (!strcmp(buf, "1.1")) { 2781 ret = 3; 2782 } else { 2783 error_setg(errp, "Invalid compatibility level: '%s'", buf); 2784 ret = -EINVAL; 2785 } 2786 g_free(buf); 2787 return ret; 2788 } 2789 2790 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 2791 Error **errp) 2792 { 2793 uint64_t refcount_bits; 2794 2795 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 2796 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 2797 error_setg(errp, "Refcount width must be a power of two and may not " 2798 "exceed 64 bits"); 2799 return 0; 2800 } 2801 2802 if (version < 3 && refcount_bits != 16) { 2803 error_setg(errp, "Different refcount widths than 16 bits require " 2804 "compatibility level 1.1 or above (use compat=1.1 or " 2805 "greater)"); 2806 return 0; 2807 } 2808 2809 return refcount_bits; 2810 } 2811 2812 static int coroutine_fn 2813 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 2814 { 2815 BlockdevCreateOptionsQcow2 *qcow2_opts; 2816 QDict *options; 2817 2818 /* 2819 * Open the image file and write a minimal qcow2 header. 2820 * 2821 * We keep things simple and start with a zero-sized image. We also 2822 * do without refcount blocks or a L1 table for now. We'll fix the 2823 * inconsistency later. 2824 * 2825 * We do need a refcount table because growing the refcount table means 2826 * allocating two new refcount blocks - the seconds of which would be at 2827 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 2828 * size for any qcow2 image. 2829 */ 2830 BlockBackend *blk = NULL; 2831 BlockDriverState *bs = NULL; 2832 QCowHeader *header; 2833 size_t cluster_size; 2834 int version; 2835 int refcount_order; 2836 uint64_t* refcount_table; 2837 Error *local_err = NULL; 2838 int ret; 2839 2840 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 2841 qcow2_opts = &create_options->u.qcow2; 2842 2843 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp); 2844 if (bs == NULL) { 2845 return -EIO; 2846 } 2847 2848 /* Validate options and set default values */ 2849 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 2850 error_setg(errp, "Image size must be a multiple of 512 bytes"); 2851 ret = -EINVAL; 2852 goto out; 2853 } 2854 2855 if (qcow2_opts->has_version) { 2856 switch (qcow2_opts->version) { 2857 case BLOCKDEV_QCOW2_VERSION_V2: 2858 version = 2; 2859 break; 2860 case BLOCKDEV_QCOW2_VERSION_V3: 2861 version = 3; 2862 break; 2863 default: 2864 g_assert_not_reached(); 2865 } 2866 } else { 2867 version = 3; 2868 } 2869 2870 if (qcow2_opts->has_cluster_size) { 2871 cluster_size = qcow2_opts->cluster_size; 2872 } else { 2873 cluster_size = DEFAULT_CLUSTER_SIZE; 2874 } 2875 2876 if (!validate_cluster_size(cluster_size, errp)) { 2877 ret = -EINVAL; 2878 goto out; 2879 } 2880 2881 if (!qcow2_opts->has_preallocation) { 2882 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 2883 } 2884 if (qcow2_opts->has_backing_file && 2885 qcow2_opts->preallocation != PREALLOC_MODE_OFF) 2886 { 2887 error_setg(errp, "Backing file and preallocation cannot be used at " 2888 "the same time"); 2889 ret = -EINVAL; 2890 goto out; 2891 } 2892 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { 2893 error_setg(errp, "Backing format cannot be used without backing file"); 2894 ret = -EINVAL; 2895 goto out; 2896 } 2897 2898 if (!qcow2_opts->has_lazy_refcounts) { 2899 qcow2_opts->lazy_refcounts = false; 2900 } 2901 if (version < 3 && qcow2_opts->lazy_refcounts) { 2902 error_setg(errp, "Lazy refcounts only supported with compatibility " 2903 "level 1.1 and above (use version=v3 or greater)"); 2904 ret = -EINVAL; 2905 goto out; 2906 } 2907 2908 if (!qcow2_opts->has_refcount_bits) { 2909 qcow2_opts->refcount_bits = 16; 2910 } 2911 if (qcow2_opts->refcount_bits > 64 || 2912 !is_power_of_2(qcow2_opts->refcount_bits)) 2913 { 2914 error_setg(errp, "Refcount width must be a power of two and may not " 2915 "exceed 64 bits"); 2916 ret = -EINVAL; 2917 goto out; 2918 } 2919 if (version < 3 && qcow2_opts->refcount_bits != 16) { 2920 error_setg(errp, "Different refcount widths than 16 bits require " 2921 "compatibility level 1.1 or above (use version=v3 or " 2922 "greater)"); 2923 ret = -EINVAL; 2924 goto out; 2925 } 2926 refcount_order = ctz32(qcow2_opts->refcount_bits); 2927 2928 2929 /* Create BlockBackend to write to the image */ 2930 blk = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); 2931 ret = blk_insert_bs(blk, bs, errp); 2932 if (ret < 0) { 2933 goto out; 2934 } 2935 blk_set_allow_write_beyond_eof(blk, true); 2936 2937 /* Clear the protocol layer and preallocate it if necessary */ 2938 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); 2939 if (ret < 0) { 2940 goto out; 2941 } 2942 2943 if (qcow2_opts->preallocation == PREALLOC_MODE_FULL || 2944 qcow2_opts->preallocation == PREALLOC_MODE_FALLOC) 2945 { 2946 int64_t prealloc_size = 2947 qcow2_calc_prealloc_size(qcow2_opts->size, cluster_size, 2948 refcount_order); 2949 2950 ret = blk_truncate(blk, prealloc_size, qcow2_opts->preallocation, errp); 2951 if (ret < 0) { 2952 goto out; 2953 } 2954 } 2955 2956 /* Write the header */ 2957 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 2958 header = g_malloc0(cluster_size); 2959 *header = (QCowHeader) { 2960 .magic = cpu_to_be32(QCOW_MAGIC), 2961 .version = cpu_to_be32(version), 2962 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 2963 .size = cpu_to_be64(0), 2964 .l1_table_offset = cpu_to_be64(0), 2965 .l1_size = cpu_to_be32(0), 2966 .refcount_table_offset = cpu_to_be64(cluster_size), 2967 .refcount_table_clusters = cpu_to_be32(1), 2968 .refcount_order = cpu_to_be32(refcount_order), 2969 .header_length = cpu_to_be32(sizeof(*header)), 2970 }; 2971 2972 /* We'll update this to correct value later */ 2973 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 2974 2975 if (qcow2_opts->lazy_refcounts) { 2976 header->compatible_features |= 2977 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 2978 } 2979 2980 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 2981 g_free(header); 2982 if (ret < 0) { 2983 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 2984 goto out; 2985 } 2986 2987 /* Write a refcount table with one refcount block */ 2988 refcount_table = g_malloc0(2 * cluster_size); 2989 refcount_table[0] = cpu_to_be64(2 * cluster_size); 2990 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 2991 g_free(refcount_table); 2992 2993 if (ret < 0) { 2994 error_setg_errno(errp, -ret, "Could not write refcount table"); 2995 goto out; 2996 } 2997 2998 blk_unref(blk); 2999 blk = NULL; 3000 3001 /* 3002 * And now open the image and make it consistent first (i.e. increase the 3003 * refcount of the cluster that is occupied by the header and the refcount 3004 * table) 3005 */ 3006 options = qdict_new(); 3007 qdict_put_str(options, "driver", "qcow2"); 3008 qdict_put_str(options, "file", bs->node_name); 3009 blk = blk_new_open(NULL, NULL, options, 3010 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3011 &local_err); 3012 if (blk == NULL) { 3013 error_propagate(errp, local_err); 3014 ret = -EIO; 3015 goto out; 3016 } 3017 3018 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3019 if (ret < 0) { 3020 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3021 "header and refcount table"); 3022 goto out; 3023 3024 } else if (ret != 0) { 3025 error_report("Huh, first cluster in empty image is already in use?"); 3026 abort(); 3027 } 3028 3029 /* Create a full header (including things like feature table) */ 3030 ret = qcow2_update_header(blk_bs(blk)); 3031 if (ret < 0) { 3032 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3033 goto out; 3034 } 3035 3036 /* Okay, now that we have a valid image, let's give it the right size */ 3037 ret = blk_truncate(blk, qcow2_opts->size, PREALLOC_MODE_OFF, errp); 3038 if (ret < 0) { 3039 error_prepend(errp, "Could not resize image: "); 3040 goto out; 3041 } 3042 3043 /* Want a backing file? There you go.*/ 3044 if (qcow2_opts->has_backing_file) { 3045 const char *backing_format = NULL; 3046 3047 if (qcow2_opts->has_backing_fmt) { 3048 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3049 } 3050 3051 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3052 backing_format); 3053 if (ret < 0) { 3054 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3055 "with format '%s'", qcow2_opts->backing_file, 3056 backing_format); 3057 goto out; 3058 } 3059 } 3060 3061 /* Want encryption? There you go. */ 3062 if (qcow2_opts->has_encrypt) { 3063 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3064 if (ret < 0) { 3065 goto out; 3066 } 3067 } 3068 3069 /* And if we're supposed to preallocate metadata, do that now */ 3070 if (qcow2_opts->preallocation != PREALLOC_MODE_OFF) { 3071 BDRVQcow2State *s = blk_bs(blk)->opaque; 3072 qemu_co_mutex_lock(&s->lock); 3073 ret = preallocate_co(blk_bs(blk), 0, qcow2_opts->size); 3074 qemu_co_mutex_unlock(&s->lock); 3075 3076 if (ret < 0) { 3077 error_setg_errno(errp, -ret, "Could not preallocate metadata"); 3078 goto out; 3079 } 3080 } 3081 3082 blk_unref(blk); 3083 blk = NULL; 3084 3085 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3086 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3087 * have to setup decryption context. We're not doing any I/O on the top 3088 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3089 * not have effect. 3090 */ 3091 options = qdict_new(); 3092 qdict_put_str(options, "driver", "qcow2"); 3093 qdict_put_str(options, "file", bs->node_name); 3094 blk = blk_new_open(NULL, NULL, options, 3095 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3096 &local_err); 3097 if (blk == NULL) { 3098 error_propagate(errp, local_err); 3099 ret = -EIO; 3100 goto out; 3101 } 3102 3103 ret = 0; 3104 out: 3105 blk_unref(blk); 3106 bdrv_unref(bs); 3107 return ret; 3108 } 3109 3110 static int coroutine_fn qcow2_co_create_opts(const char *filename, QemuOpts *opts, 3111 Error **errp) 3112 { 3113 BlockdevCreateOptions *create_options = NULL; 3114 QDict *qdict; 3115 Visitor *v; 3116 BlockDriverState *bs = NULL; 3117 Error *local_err = NULL; 3118 const char *val; 3119 int ret; 3120 3121 /* Only the keyval visitor supports the dotted syntax needed for 3122 * encryption, so go through a QDict before getting a QAPI type. Ignore 3123 * options meant for the protocol layer so that the visitor doesn't 3124 * complain. */ 3125 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3126 true); 3127 3128 /* Handle encryption options */ 3129 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3130 if (val && !strcmp(val, "on")) { 3131 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3132 } else if (val && !strcmp(val, "off")) { 3133 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3134 } 3135 3136 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3137 if (val && !strcmp(val, "aes")) { 3138 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3139 } 3140 3141 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3142 * version=v2/v3 below. */ 3143 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3144 if (val && !strcmp(val, "0.10")) { 3145 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3146 } else if (val && !strcmp(val, "1.1")) { 3147 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3148 } 3149 3150 /* Change legacy command line options into QMP ones */ 3151 static const QDictRenames opt_renames[] = { 3152 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3153 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3154 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3155 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3156 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3157 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3158 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3159 { NULL, NULL }, 3160 }; 3161 3162 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3163 ret = -EINVAL; 3164 goto finish; 3165 } 3166 3167 /* Create and open the file (protocol layer) */ 3168 ret = bdrv_create_file(filename, opts, errp); 3169 if (ret < 0) { 3170 goto finish; 3171 } 3172 3173 bs = bdrv_open(filename, NULL, NULL, 3174 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3175 if (bs == NULL) { 3176 ret = -EIO; 3177 goto finish; 3178 } 3179 3180 /* Set 'driver' and 'node' options */ 3181 qdict_put_str(qdict, "driver", "qcow2"); 3182 qdict_put_str(qdict, "file", bs->node_name); 3183 3184 /* Now get the QAPI type BlockdevCreateOptions */ 3185 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3186 if (!v) { 3187 ret = -EINVAL; 3188 goto finish; 3189 } 3190 3191 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 3192 visit_free(v); 3193 3194 if (local_err) { 3195 error_propagate(errp, local_err); 3196 ret = -EINVAL; 3197 goto finish; 3198 } 3199 3200 /* Silently round up size */ 3201 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3202 BDRV_SECTOR_SIZE); 3203 3204 /* Create the qcow2 image (format layer) */ 3205 ret = qcow2_co_create(create_options, errp); 3206 if (ret < 0) { 3207 goto finish; 3208 } 3209 3210 ret = 0; 3211 finish: 3212 qobject_unref(qdict); 3213 bdrv_unref(bs); 3214 qapi_free_BlockdevCreateOptions(create_options); 3215 return ret; 3216 } 3217 3218 3219 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3220 { 3221 int64_t nr; 3222 int res; 3223 3224 /* Clamp to image length, before checking status of underlying sectors */ 3225 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3226 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3227 } 3228 3229 if (!bytes) { 3230 return true; 3231 } 3232 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3233 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes; 3234 } 3235 3236 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3237 int64_t offset, int bytes, BdrvRequestFlags flags) 3238 { 3239 int ret; 3240 BDRVQcow2State *s = bs->opaque; 3241 3242 uint32_t head = offset % s->cluster_size; 3243 uint32_t tail = (offset + bytes) % s->cluster_size; 3244 3245 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3246 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3247 tail = 0; 3248 } 3249 3250 if (head || tail) { 3251 uint64_t off; 3252 unsigned int nr; 3253 3254 assert(head + bytes <= s->cluster_size); 3255 3256 /* check whether remainder of cluster already reads as zero */ 3257 if (!(is_zero(bs, offset - head, head) && 3258 is_zero(bs, offset + bytes, 3259 tail ? s->cluster_size - tail : 0))) { 3260 return -ENOTSUP; 3261 } 3262 3263 qemu_co_mutex_lock(&s->lock); 3264 /* We can have new write after previous check */ 3265 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size); 3266 bytes = s->cluster_size; 3267 nr = s->cluster_size; 3268 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3269 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3270 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3271 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3272 qemu_co_mutex_unlock(&s->lock); 3273 return -ENOTSUP; 3274 } 3275 } else { 3276 qemu_co_mutex_lock(&s->lock); 3277 } 3278 3279 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3280 3281 /* Whatever is left can use real zero clusters */ 3282 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3283 qemu_co_mutex_unlock(&s->lock); 3284 3285 return ret; 3286 } 3287 3288 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3289 int64_t offset, int bytes) 3290 { 3291 int ret; 3292 BDRVQcow2State *s = bs->opaque; 3293 3294 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3295 assert(bytes < s->cluster_size); 3296 /* Ignore partial clusters, except for the special case of the 3297 * complete partial cluster at the end of an unaligned file */ 3298 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3299 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3300 return -ENOTSUP; 3301 } 3302 } 3303 3304 qemu_co_mutex_lock(&s->lock); 3305 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3306 false); 3307 qemu_co_mutex_unlock(&s->lock); 3308 return ret; 3309 } 3310 3311 static int coroutine_fn 3312 qcow2_co_copy_range_from(BlockDriverState *bs, 3313 BdrvChild *src, uint64_t src_offset, 3314 BdrvChild *dst, uint64_t dst_offset, 3315 uint64_t bytes, BdrvRequestFlags read_flags, 3316 BdrvRequestFlags write_flags) 3317 { 3318 BDRVQcow2State *s = bs->opaque; 3319 int ret; 3320 unsigned int cur_bytes; /* number of bytes in current iteration */ 3321 BdrvChild *child = NULL; 3322 BdrvRequestFlags cur_write_flags; 3323 3324 assert(!bs->encrypted); 3325 qemu_co_mutex_lock(&s->lock); 3326 3327 while (bytes != 0) { 3328 uint64_t copy_offset = 0; 3329 /* prepare next request */ 3330 cur_bytes = MIN(bytes, INT_MAX); 3331 cur_write_flags = write_flags; 3332 3333 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); 3334 if (ret < 0) { 3335 goto out; 3336 } 3337 3338 switch (ret) { 3339 case QCOW2_CLUSTER_UNALLOCATED: 3340 if (bs->backing && bs->backing->bs) { 3341 int64_t backing_length = bdrv_getlength(bs->backing->bs); 3342 if (src_offset >= backing_length) { 3343 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3344 } else { 3345 child = bs->backing; 3346 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 3347 copy_offset = src_offset; 3348 } 3349 } else { 3350 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3351 } 3352 break; 3353 3354 case QCOW2_CLUSTER_ZERO_PLAIN: 3355 case QCOW2_CLUSTER_ZERO_ALLOC: 3356 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3357 break; 3358 3359 case QCOW2_CLUSTER_COMPRESSED: 3360 ret = -ENOTSUP; 3361 goto out; 3362 3363 case QCOW2_CLUSTER_NORMAL: 3364 child = bs->file; 3365 copy_offset += offset_into_cluster(s, src_offset); 3366 if ((copy_offset & 511) != 0) { 3367 ret = -EIO; 3368 goto out; 3369 } 3370 break; 3371 3372 default: 3373 abort(); 3374 } 3375 qemu_co_mutex_unlock(&s->lock); 3376 ret = bdrv_co_copy_range_from(child, 3377 copy_offset, 3378 dst, dst_offset, 3379 cur_bytes, read_flags, cur_write_flags); 3380 qemu_co_mutex_lock(&s->lock); 3381 if (ret < 0) { 3382 goto out; 3383 } 3384 3385 bytes -= cur_bytes; 3386 src_offset += cur_bytes; 3387 dst_offset += cur_bytes; 3388 } 3389 ret = 0; 3390 3391 out: 3392 qemu_co_mutex_unlock(&s->lock); 3393 return ret; 3394 } 3395 3396 static int coroutine_fn 3397 qcow2_co_copy_range_to(BlockDriverState *bs, 3398 BdrvChild *src, uint64_t src_offset, 3399 BdrvChild *dst, uint64_t dst_offset, 3400 uint64_t bytes, BdrvRequestFlags read_flags, 3401 BdrvRequestFlags write_flags) 3402 { 3403 BDRVQcow2State *s = bs->opaque; 3404 int offset_in_cluster; 3405 int ret; 3406 unsigned int cur_bytes; /* number of sectors in current iteration */ 3407 uint64_t cluster_offset; 3408 QCowL2Meta *l2meta = NULL; 3409 3410 assert(!bs->encrypted); 3411 3412 qemu_co_mutex_lock(&s->lock); 3413 3414 while (bytes != 0) { 3415 3416 l2meta = NULL; 3417 3418 offset_in_cluster = offset_into_cluster(s, dst_offset); 3419 cur_bytes = MIN(bytes, INT_MAX); 3420 3421 /* TODO: 3422 * If src->bs == dst->bs, we could simply copy by incrementing 3423 * the refcnt, without copying user data. 3424 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 3425 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, 3426 &cluster_offset, &l2meta); 3427 if (ret < 0) { 3428 goto fail; 3429 } 3430 3431 assert((cluster_offset & 511) == 0); 3432 3433 ret = qcow2_pre_write_overlap_check(bs, 0, 3434 cluster_offset + offset_in_cluster, cur_bytes); 3435 if (ret < 0) { 3436 goto fail; 3437 } 3438 3439 qemu_co_mutex_unlock(&s->lock); 3440 ret = bdrv_co_copy_range_to(src, src_offset, 3441 bs->file, 3442 cluster_offset + offset_in_cluster, 3443 cur_bytes, read_flags, write_flags); 3444 qemu_co_mutex_lock(&s->lock); 3445 if (ret < 0) { 3446 goto fail; 3447 } 3448 3449 ret = qcow2_handle_l2meta(bs, &l2meta, true); 3450 if (ret) { 3451 goto fail; 3452 } 3453 3454 bytes -= cur_bytes; 3455 src_offset += cur_bytes; 3456 dst_offset += cur_bytes; 3457 } 3458 ret = 0; 3459 3460 fail: 3461 qcow2_handle_l2meta(bs, &l2meta, false); 3462 3463 qemu_co_mutex_unlock(&s->lock); 3464 3465 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 3466 3467 return ret; 3468 } 3469 3470 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, 3471 PreallocMode prealloc, Error **errp) 3472 { 3473 BDRVQcow2State *s = bs->opaque; 3474 uint64_t old_length; 3475 int64_t new_l1_size; 3476 int ret; 3477 QDict *options; 3478 3479 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3480 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3481 { 3482 error_setg(errp, "Unsupported preallocation mode '%s'", 3483 PreallocMode_str(prealloc)); 3484 return -ENOTSUP; 3485 } 3486 3487 if (offset & 511) { 3488 error_setg(errp, "The new size must be a multiple of 512"); 3489 return -EINVAL; 3490 } 3491 3492 qemu_co_mutex_lock(&s->lock); 3493 3494 /* cannot proceed if image has snapshots */ 3495 if (s->nb_snapshots) { 3496 error_setg(errp, "Can't resize an image which has snapshots"); 3497 ret = -ENOTSUP; 3498 goto fail; 3499 } 3500 3501 /* cannot proceed if image has bitmaps */ 3502 if (s->nb_bitmaps) { 3503 /* TODO: resize bitmaps in the image */ 3504 error_setg(errp, "Can't resize an image which has bitmaps"); 3505 ret = -ENOTSUP; 3506 goto fail; 3507 } 3508 3509 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 3510 new_l1_size = size_to_l1(s, offset); 3511 3512 if (offset < old_length) { 3513 int64_t last_cluster, old_file_size; 3514 if (prealloc != PREALLOC_MODE_OFF) { 3515 error_setg(errp, 3516 "Preallocation can't be used for shrinking an image"); 3517 ret = -EINVAL; 3518 goto fail; 3519 } 3520 3521 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 3522 old_length - ROUND_UP(offset, 3523 s->cluster_size), 3524 QCOW2_DISCARD_ALWAYS, true); 3525 if (ret < 0) { 3526 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 3527 goto fail; 3528 } 3529 3530 ret = qcow2_shrink_l1_table(bs, new_l1_size); 3531 if (ret < 0) { 3532 error_setg_errno(errp, -ret, 3533 "Failed to reduce the number of L2 tables"); 3534 goto fail; 3535 } 3536 3537 ret = qcow2_shrink_reftable(bs); 3538 if (ret < 0) { 3539 error_setg_errno(errp, -ret, 3540 "Failed to discard unused refblocks"); 3541 goto fail; 3542 } 3543 3544 old_file_size = bdrv_getlength(bs->file->bs); 3545 if (old_file_size < 0) { 3546 error_setg_errno(errp, -old_file_size, 3547 "Failed to inquire current file length"); 3548 ret = old_file_size; 3549 goto fail; 3550 } 3551 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 3552 if (last_cluster < 0) { 3553 error_setg_errno(errp, -last_cluster, 3554 "Failed to find the last cluster"); 3555 ret = last_cluster; 3556 goto fail; 3557 } 3558 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 3559 Error *local_err = NULL; 3560 3561 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 3562 PREALLOC_MODE_OFF, &local_err); 3563 if (local_err) { 3564 warn_reportf_err(local_err, 3565 "Failed to truncate the tail of the image: "); 3566 } 3567 } 3568 } else { 3569 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3570 if (ret < 0) { 3571 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3572 goto fail; 3573 } 3574 } 3575 3576 switch (prealloc) { 3577 case PREALLOC_MODE_OFF: 3578 break; 3579 3580 case PREALLOC_MODE_METADATA: 3581 ret = preallocate_co(bs, old_length, offset); 3582 if (ret < 0) { 3583 error_setg_errno(errp, -ret, "Preallocation failed"); 3584 goto fail; 3585 } 3586 break; 3587 3588 case PREALLOC_MODE_FALLOC: 3589 case PREALLOC_MODE_FULL: 3590 { 3591 int64_t allocation_start, host_offset, guest_offset; 3592 int64_t clusters_allocated; 3593 int64_t old_file_size, new_file_size; 3594 uint64_t nb_new_data_clusters, nb_new_l2_tables; 3595 3596 old_file_size = bdrv_getlength(bs->file->bs); 3597 if (old_file_size < 0) { 3598 error_setg_errno(errp, -old_file_size, 3599 "Failed to inquire current file length"); 3600 ret = old_file_size; 3601 goto fail; 3602 } 3603 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 3604 3605 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 3606 s->cluster_size); 3607 3608 /* This is an overestimation; we will not actually allocate space for 3609 * these in the file but just make sure the new refcount structures are 3610 * able to cover them so we will not have to allocate new refblocks 3611 * while entering the data blocks in the potentially new L2 tables. 3612 * (We do not actually care where the L2 tables are placed. Maybe they 3613 * are already allocated or they can be placed somewhere before 3614 * @old_file_size. It does not matter because they will be fully 3615 * allocated automatically, so they do not need to be covered by the 3616 * preallocation. All that matters is that we will not have to allocate 3617 * new refcount structures for them.) */ 3618 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 3619 s->cluster_size / sizeof(uint64_t)); 3620 /* The cluster range may not be aligned to L2 boundaries, so add one L2 3621 * table for a potential head/tail */ 3622 nb_new_l2_tables++; 3623 3624 allocation_start = qcow2_refcount_area(bs, old_file_size, 3625 nb_new_data_clusters + 3626 nb_new_l2_tables, 3627 true, 0, 0); 3628 if (allocation_start < 0) { 3629 error_setg_errno(errp, -allocation_start, 3630 "Failed to resize refcount structures"); 3631 ret = allocation_start; 3632 goto fail; 3633 } 3634 3635 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 3636 nb_new_data_clusters); 3637 if (clusters_allocated < 0) { 3638 error_setg_errno(errp, -clusters_allocated, 3639 "Failed to allocate data clusters"); 3640 ret = clusters_allocated; 3641 goto fail; 3642 } 3643 3644 assert(clusters_allocated == nb_new_data_clusters); 3645 3646 /* Allocate the data area */ 3647 new_file_size = allocation_start + 3648 nb_new_data_clusters * s->cluster_size; 3649 ret = bdrv_co_truncate(bs->file, new_file_size, prealloc, errp); 3650 if (ret < 0) { 3651 error_prepend(errp, "Failed to resize underlying file: "); 3652 qcow2_free_clusters(bs, allocation_start, 3653 nb_new_data_clusters * s->cluster_size, 3654 QCOW2_DISCARD_OTHER); 3655 goto fail; 3656 } 3657 3658 /* Create the necessary L2 entries */ 3659 host_offset = allocation_start; 3660 guest_offset = old_length; 3661 while (nb_new_data_clusters) { 3662 int64_t nb_clusters = MIN( 3663 nb_new_data_clusters, 3664 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 3665 QCowL2Meta allocation = { 3666 .offset = guest_offset, 3667 .alloc_offset = host_offset, 3668 .nb_clusters = nb_clusters, 3669 }; 3670 qemu_co_queue_init(&allocation.dependent_requests); 3671 3672 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 3673 if (ret < 0) { 3674 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 3675 qcow2_free_clusters(bs, host_offset, 3676 nb_new_data_clusters * s->cluster_size, 3677 QCOW2_DISCARD_OTHER); 3678 goto fail; 3679 } 3680 3681 guest_offset += nb_clusters * s->cluster_size; 3682 host_offset += nb_clusters * s->cluster_size; 3683 nb_new_data_clusters -= nb_clusters; 3684 } 3685 break; 3686 } 3687 3688 default: 3689 g_assert_not_reached(); 3690 } 3691 3692 if (prealloc != PREALLOC_MODE_OFF) { 3693 /* Flush metadata before actually changing the image size */ 3694 ret = qcow2_write_caches(bs); 3695 if (ret < 0) { 3696 error_setg_errno(errp, -ret, 3697 "Failed to flush the preallocated area to disk"); 3698 goto fail; 3699 } 3700 } 3701 3702 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 3703 3704 /* write updated header.size */ 3705 offset = cpu_to_be64(offset); 3706 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 3707 &offset, sizeof(uint64_t)); 3708 if (ret < 0) { 3709 error_setg_errno(errp, -ret, "Failed to update the image size"); 3710 goto fail; 3711 } 3712 3713 s->l1_vm_state_index = new_l1_size; 3714 3715 /* Update cache sizes */ 3716 options = qdict_clone_shallow(bs->options); 3717 ret = qcow2_update_options(bs, options, s->flags, errp); 3718 qobject_unref(options); 3719 if (ret < 0) { 3720 goto fail; 3721 } 3722 ret = 0; 3723 fail: 3724 qemu_co_mutex_unlock(&s->lock); 3725 return ret; 3726 } 3727 3728 /* 3729 * qcow2_compress() 3730 * 3731 * @dest - destination buffer, @dest_size bytes 3732 * @src - source buffer, @src_size bytes 3733 * 3734 * Returns: compressed size on success 3735 * -1 destination buffer is not enough to store compressed data 3736 * -2 on any other error 3737 */ 3738 static ssize_t qcow2_compress(void *dest, size_t dest_size, 3739 const void *src, size_t src_size) 3740 { 3741 ssize_t ret; 3742 z_stream strm; 3743 3744 /* best compression, small window, no zlib header */ 3745 memset(&strm, 0, sizeof(strm)); 3746 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 3747 -12, 9, Z_DEFAULT_STRATEGY); 3748 if (ret != Z_OK) { 3749 return -2; 3750 } 3751 3752 /* strm.next_in is not const in old zlib versions, such as those used on 3753 * OpenBSD/NetBSD, so cast the const away */ 3754 strm.avail_in = src_size; 3755 strm.next_in = (void *) src; 3756 strm.avail_out = dest_size; 3757 strm.next_out = dest; 3758 3759 ret = deflate(&strm, Z_FINISH); 3760 if (ret == Z_STREAM_END) { 3761 ret = dest_size - strm.avail_out; 3762 } else { 3763 ret = (ret == Z_OK ? -1 : -2); 3764 } 3765 3766 deflateEnd(&strm); 3767 3768 return ret; 3769 } 3770 3771 /* 3772 * qcow2_decompress() 3773 * 3774 * Decompress some data (not more than @src_size bytes) to produce exactly 3775 * @dest_size bytes. 3776 * 3777 * @dest - destination buffer, @dest_size bytes 3778 * @src - source buffer, @src_size bytes 3779 * 3780 * Returns: 0 on success 3781 * -1 on fail 3782 */ 3783 static ssize_t qcow2_decompress(void *dest, size_t dest_size, 3784 const void *src, size_t src_size) 3785 { 3786 int ret = 0; 3787 z_stream strm; 3788 3789 memset(&strm, 0, sizeof(strm)); 3790 strm.avail_in = src_size; 3791 strm.next_in = (void *) src; 3792 strm.avail_out = dest_size; 3793 strm.next_out = dest; 3794 3795 ret = inflateInit2(&strm, -12); 3796 if (ret != Z_OK) { 3797 return -1; 3798 } 3799 3800 ret = inflate(&strm, Z_FINISH); 3801 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || strm.avail_out != 0) { 3802 /* We approve Z_BUF_ERROR because we need @dest buffer to be filled, but 3803 * @src buffer may be processed partly (because in qcow2 we know size of 3804 * compressed data with precision of one sector) */ 3805 ret = -1; 3806 } 3807 3808 inflateEnd(&strm); 3809 3810 return ret; 3811 } 3812 3813 #define MAX_COMPRESS_THREADS 4 3814 3815 typedef ssize_t (*Qcow2CompressFunc)(void *dest, size_t dest_size, 3816 const void *src, size_t src_size); 3817 typedef struct Qcow2CompressData { 3818 void *dest; 3819 size_t dest_size; 3820 const void *src; 3821 size_t src_size; 3822 ssize_t ret; 3823 3824 Qcow2CompressFunc func; 3825 } Qcow2CompressData; 3826 3827 static int qcow2_compress_pool_func(void *opaque) 3828 { 3829 Qcow2CompressData *data = opaque; 3830 3831 data->ret = data->func(data->dest, data->dest_size, 3832 data->src, data->src_size); 3833 3834 return 0; 3835 } 3836 3837 static void qcow2_compress_complete(void *opaque, int ret) 3838 { 3839 qemu_coroutine_enter(opaque); 3840 } 3841 3842 static ssize_t coroutine_fn 3843 qcow2_co_do_compress(BlockDriverState *bs, void *dest, size_t dest_size, 3844 const void *src, size_t src_size, Qcow2CompressFunc func) 3845 { 3846 BDRVQcow2State *s = bs->opaque; 3847 BlockAIOCB *acb; 3848 ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs)); 3849 Qcow2CompressData arg = { 3850 .dest = dest, 3851 .dest_size = dest_size, 3852 .src = src, 3853 .src_size = src_size, 3854 .func = func, 3855 }; 3856 3857 while (s->nb_compress_threads >= MAX_COMPRESS_THREADS) { 3858 qemu_co_queue_wait(&s->compress_wait_queue, NULL); 3859 } 3860 3861 s->nb_compress_threads++; 3862 acb = thread_pool_submit_aio(pool, qcow2_compress_pool_func, &arg, 3863 qcow2_compress_complete, 3864 qemu_coroutine_self()); 3865 3866 if (!acb) { 3867 s->nb_compress_threads--; 3868 return -EINVAL; 3869 } 3870 qemu_coroutine_yield(); 3871 s->nb_compress_threads--; 3872 qemu_co_queue_next(&s->compress_wait_queue); 3873 3874 return arg.ret; 3875 } 3876 3877 static ssize_t coroutine_fn 3878 qcow2_co_compress(BlockDriverState *bs, void *dest, size_t dest_size, 3879 const void *src, size_t src_size) 3880 { 3881 return qcow2_co_do_compress(bs, dest, dest_size, src, src_size, 3882 qcow2_compress); 3883 } 3884 3885 static ssize_t coroutine_fn 3886 qcow2_co_decompress(BlockDriverState *bs, void *dest, size_t dest_size, 3887 const void *src, size_t src_size) 3888 { 3889 return qcow2_co_do_compress(bs, dest, dest_size, src, src_size, 3890 qcow2_decompress); 3891 } 3892 3893 /* XXX: put compressed sectors first, then all the cluster aligned 3894 tables to avoid losing bytes in alignment */ 3895 static coroutine_fn int 3896 qcow2_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, 3897 uint64_t bytes, QEMUIOVector *qiov) 3898 { 3899 BDRVQcow2State *s = bs->opaque; 3900 QEMUIOVector hd_qiov; 3901 int ret; 3902 size_t out_len; 3903 uint8_t *buf, *out_buf; 3904 int64_t cluster_offset; 3905 3906 if (bytes == 0) { 3907 /* align end of file to a sector boundary to ease reading with 3908 sector based I/Os */ 3909 cluster_offset = bdrv_getlength(bs->file->bs); 3910 if (cluster_offset < 0) { 3911 return cluster_offset; 3912 } 3913 return bdrv_co_truncate(bs->file, cluster_offset, PREALLOC_MODE_OFF, 3914 NULL); 3915 } 3916 3917 if (offset_into_cluster(s, offset)) { 3918 return -EINVAL; 3919 } 3920 3921 buf = qemu_blockalign(bs, s->cluster_size); 3922 if (bytes != s->cluster_size) { 3923 if (bytes > s->cluster_size || 3924 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 3925 { 3926 qemu_vfree(buf); 3927 return -EINVAL; 3928 } 3929 /* Zero-pad last write if image size is not cluster aligned */ 3930 memset(buf + bytes, 0, s->cluster_size - bytes); 3931 } 3932 qemu_iovec_to_buf(qiov, 0, buf, bytes); 3933 3934 out_buf = g_malloc(s->cluster_size); 3935 3936 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 3937 buf, s->cluster_size); 3938 if (out_len == -2) { 3939 ret = -EINVAL; 3940 goto fail; 3941 } else if (out_len == -1) { 3942 /* could not compress: write normal cluster */ 3943 ret = qcow2_co_pwritev(bs, offset, bytes, qiov, 0); 3944 if (ret < 0) { 3945 goto fail; 3946 } 3947 goto success; 3948 } 3949 3950 qemu_co_mutex_lock(&s->lock); 3951 cluster_offset = 3952 qcow2_alloc_compressed_cluster_offset(bs, offset, out_len); 3953 if (!cluster_offset) { 3954 qemu_co_mutex_unlock(&s->lock); 3955 ret = -EIO; 3956 goto fail; 3957 } 3958 cluster_offset &= s->cluster_offset_mask; 3959 3960 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len); 3961 qemu_co_mutex_unlock(&s->lock); 3962 if (ret < 0) { 3963 goto fail; 3964 } 3965 3966 qemu_iovec_init_buf(&hd_qiov, out_buf, out_len); 3967 3968 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 3969 ret = bdrv_co_pwritev(bs->file, cluster_offset, out_len, &hd_qiov, 0); 3970 if (ret < 0) { 3971 goto fail; 3972 } 3973 success: 3974 ret = 0; 3975 fail: 3976 qemu_vfree(buf); 3977 g_free(out_buf); 3978 return ret; 3979 } 3980 3981 static int coroutine_fn 3982 qcow2_co_preadv_compressed(BlockDriverState *bs, 3983 uint64_t file_cluster_offset, 3984 uint64_t offset, 3985 uint64_t bytes, 3986 QEMUIOVector *qiov) 3987 { 3988 BDRVQcow2State *s = bs->opaque; 3989 int ret = 0, csize, nb_csectors; 3990 uint64_t coffset; 3991 uint8_t *buf, *out_buf; 3992 QEMUIOVector local_qiov; 3993 int offset_in_cluster = offset_into_cluster(s, offset); 3994 3995 coffset = file_cluster_offset & s->cluster_offset_mask; 3996 nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 3997 csize = nb_csectors * 512 - (coffset & 511); 3998 3999 buf = g_try_malloc(csize); 4000 if (!buf) { 4001 return -ENOMEM; 4002 } 4003 qemu_iovec_init_buf(&local_qiov, buf, csize); 4004 4005 out_buf = qemu_blockalign(bs, s->cluster_size); 4006 4007 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4008 ret = bdrv_co_preadv(bs->file, coffset, csize, &local_qiov, 0); 4009 if (ret < 0) { 4010 goto fail; 4011 } 4012 4013 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4014 ret = -EIO; 4015 goto fail; 4016 } 4017 4018 qemu_iovec_from_buf(qiov, 0, out_buf + offset_in_cluster, bytes); 4019 4020 fail: 4021 qemu_vfree(out_buf); 4022 g_free(buf); 4023 4024 return ret; 4025 } 4026 4027 static int make_completely_empty(BlockDriverState *bs) 4028 { 4029 BDRVQcow2State *s = bs->opaque; 4030 Error *local_err = NULL; 4031 int ret, l1_clusters; 4032 int64_t offset; 4033 uint64_t *new_reftable = NULL; 4034 uint64_t rt_entry, l1_size2; 4035 struct { 4036 uint64_t l1_offset; 4037 uint64_t reftable_offset; 4038 uint32_t reftable_clusters; 4039 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4040 4041 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4042 if (ret < 0) { 4043 goto fail; 4044 } 4045 4046 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4047 if (ret < 0) { 4048 goto fail; 4049 } 4050 4051 /* Refcounts will be broken utterly */ 4052 ret = qcow2_mark_dirty(bs); 4053 if (ret < 0) { 4054 goto fail; 4055 } 4056 4057 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4058 4059 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4060 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 4061 4062 /* After this call, neither the in-memory nor the on-disk refcount 4063 * information accurately describe the actual references */ 4064 4065 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4066 l1_clusters * s->cluster_size, 0); 4067 if (ret < 0) { 4068 goto fail_broken_refcounts; 4069 } 4070 memset(s->l1_table, 0, l1_size2); 4071 4072 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4073 4074 /* Overwrite enough clusters at the beginning of the sectors to place 4075 * the refcount table, a refcount block and the L1 table in; this may 4076 * overwrite parts of the existing refcount and L1 table, which is not 4077 * an issue because the dirty flag is set, complete data loss is in fact 4078 * desired and partial data loss is consequently fine as well */ 4079 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4080 (2 + l1_clusters) * s->cluster_size, 0); 4081 /* This call (even if it failed overall) may have overwritten on-disk 4082 * refcount structures; in that case, the in-memory refcount information 4083 * will probably differ from the on-disk information which makes the BDS 4084 * unusable */ 4085 if (ret < 0) { 4086 goto fail_broken_refcounts; 4087 } 4088 4089 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4090 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4091 4092 /* "Create" an empty reftable (one cluster) directly after the image 4093 * header and an empty L1 table three clusters after the image header; 4094 * the cluster between those two will be used as the first refblock */ 4095 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4096 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4097 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4098 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4099 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 4100 if (ret < 0) { 4101 goto fail_broken_refcounts; 4102 } 4103 4104 s->l1_table_offset = 3 * s->cluster_size; 4105 4106 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 4107 if (!new_reftable) { 4108 ret = -ENOMEM; 4109 goto fail_broken_refcounts; 4110 } 4111 4112 s->refcount_table_offset = s->cluster_size; 4113 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 4114 s->max_refcount_table_index = 0; 4115 4116 g_free(s->refcount_table); 4117 s->refcount_table = new_reftable; 4118 new_reftable = NULL; 4119 4120 /* Now the in-memory refcount information again corresponds to the on-disk 4121 * information (reftable is empty and no refblocks (the refblock cache is 4122 * empty)); however, this means some clusters (e.g. the image header) are 4123 * referenced, but not refcounted, but the normal qcow2 code assumes that 4124 * the in-memory information is always correct */ 4125 4126 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4127 4128 /* Enter the first refblock into the reftable */ 4129 rt_entry = cpu_to_be64(2 * s->cluster_size); 4130 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 4131 &rt_entry, sizeof(rt_entry)); 4132 if (ret < 0) { 4133 goto fail_broken_refcounts; 4134 } 4135 s->refcount_table[0] = 2 * s->cluster_size; 4136 4137 s->free_cluster_index = 0; 4138 assert(3 + l1_clusters <= s->refcount_block_size); 4139 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4140 if (offset < 0) { 4141 ret = offset; 4142 goto fail_broken_refcounts; 4143 } else if (offset > 0) { 4144 error_report("First cluster in emptied image is in use"); 4145 abort(); 4146 } 4147 4148 /* Now finally the in-memory information corresponds to the on-disk 4149 * structures and is correct */ 4150 ret = qcow2_mark_clean(bs); 4151 if (ret < 0) { 4152 goto fail; 4153 } 4154 4155 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 4156 PREALLOC_MODE_OFF, &local_err); 4157 if (ret < 0) { 4158 error_report_err(local_err); 4159 goto fail; 4160 } 4161 4162 return 0; 4163 4164 fail_broken_refcounts: 4165 /* The BDS is unusable at this point. If we wanted to make it usable, we 4166 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4167 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4168 * again. However, because the functions which could have caused this error 4169 * path to be taken are used by those functions as well, it's very likely 4170 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4171 bs->drv = NULL; 4172 4173 fail: 4174 g_free(new_reftable); 4175 return ret; 4176 } 4177 4178 static int qcow2_make_empty(BlockDriverState *bs) 4179 { 4180 BDRVQcow2State *s = bs->opaque; 4181 uint64_t offset, end_offset; 4182 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4183 int l1_clusters, ret = 0; 4184 4185 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4186 4187 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4188 3 + l1_clusters <= s->refcount_block_size && 4189 s->crypt_method_header != QCOW_CRYPT_LUKS) { 4190 /* The following function only works for qcow2 v3 images (it 4191 * requires the dirty flag) and only as long as there are no 4192 * features that reserve extra clusters (such as snapshots, 4193 * LUKS header, or persistent bitmaps), because it completely 4194 * empties the image. Furthermore, the L1 table and three 4195 * additional clusters (image header, refcount table, one 4196 * refcount block) have to fit inside one refcount block. */ 4197 return make_completely_empty(bs); 4198 } 4199 4200 /* This fallback code simply discards every active cluster; this is slow, 4201 * but works in all cases */ 4202 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4203 for (offset = 0; offset < end_offset; offset += step) { 4204 /* As this function is generally used after committing an external 4205 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4206 * default action for this kind of discard is to pass the discard, 4207 * which will ideally result in an actually smaller image file, as 4208 * is probably desired. */ 4209 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4210 QCOW2_DISCARD_SNAPSHOT, true); 4211 if (ret < 0) { 4212 break; 4213 } 4214 } 4215 4216 return ret; 4217 } 4218 4219 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4220 { 4221 BDRVQcow2State *s = bs->opaque; 4222 int ret; 4223 4224 qemu_co_mutex_lock(&s->lock); 4225 ret = qcow2_write_caches(bs); 4226 qemu_co_mutex_unlock(&s->lock); 4227 4228 return ret; 4229 } 4230 4231 static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block, 4232 size_t headerlen, void *opaque, Error **errp) 4233 { 4234 size_t *headerlenp = opaque; 4235 4236 /* Stash away the payload size */ 4237 *headerlenp = headerlen; 4238 return 0; 4239 } 4240 4241 static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block, 4242 size_t offset, const uint8_t *buf, size_t buflen, 4243 void *opaque, Error **errp) 4244 { 4245 /* Discard the bytes, we're not actually writing to an image */ 4246 return buflen; 4247 } 4248 4249 /* Determine the number of bytes for the LUKS payload */ 4250 static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len, 4251 Error **errp) 4252 { 4253 QDict *opts_qdict; 4254 QDict *cryptoopts_qdict; 4255 QCryptoBlockCreateOptions *cryptoopts; 4256 QCryptoBlock *crypto; 4257 4258 /* Extract "encrypt." options into a qdict */ 4259 opts_qdict = qemu_opts_to_qdict(opts, NULL); 4260 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 4261 qobject_unref(opts_qdict); 4262 4263 /* Build QCryptoBlockCreateOptions object from qdict */ 4264 qdict_put_str(cryptoopts_qdict, "format", "luks"); 4265 cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp); 4266 qobject_unref(cryptoopts_qdict); 4267 if (!cryptoopts) { 4268 return false; 4269 } 4270 4271 /* Fake LUKS creation in order to determine the payload size */ 4272 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 4273 qcow2_measure_crypto_hdr_init_func, 4274 qcow2_measure_crypto_hdr_write_func, 4275 len, errp); 4276 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 4277 if (!crypto) { 4278 return false; 4279 } 4280 4281 qcrypto_block_free(crypto); 4282 return true; 4283 } 4284 4285 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4286 Error **errp) 4287 { 4288 Error *local_err = NULL; 4289 BlockMeasureInfo *info; 4290 uint64_t required = 0; /* bytes that contribute to required size */ 4291 uint64_t virtual_size; /* disk size as seen by guest */ 4292 uint64_t refcount_bits; 4293 uint64_t l2_tables; 4294 uint64_t luks_payload_size = 0; 4295 size_t cluster_size; 4296 int version; 4297 char *optstr; 4298 PreallocMode prealloc; 4299 bool has_backing_file; 4300 bool has_luks; 4301 4302 /* Parse image creation options */ 4303 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 4304 if (local_err) { 4305 goto err; 4306 } 4307 4308 version = qcow2_opt_get_version_del(opts, &local_err); 4309 if (local_err) { 4310 goto err; 4311 } 4312 4313 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 4314 if (local_err) { 4315 goto err; 4316 } 4317 4318 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 4319 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 4320 PREALLOC_MODE_OFF, &local_err); 4321 g_free(optstr); 4322 if (local_err) { 4323 goto err; 4324 } 4325 4326 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 4327 has_backing_file = !!optstr; 4328 g_free(optstr); 4329 4330 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 4331 has_luks = optstr && strcmp(optstr, "luks") == 0; 4332 g_free(optstr); 4333 4334 if (has_luks) { 4335 size_t headerlen; 4336 4337 if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) { 4338 goto err; 4339 } 4340 4341 luks_payload_size = ROUND_UP(headerlen, cluster_size); 4342 } 4343 4344 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 4345 virtual_size = ROUND_UP(virtual_size, cluster_size); 4346 4347 /* Check that virtual disk size is valid */ 4348 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 4349 cluster_size / sizeof(uint64_t)); 4350 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 4351 error_setg(&local_err, "The image size is too large " 4352 "(try using a larger cluster size)"); 4353 goto err; 4354 } 4355 4356 /* Account for input image */ 4357 if (in_bs) { 4358 int64_t ssize = bdrv_getlength(in_bs); 4359 if (ssize < 0) { 4360 error_setg_errno(&local_err, -ssize, 4361 "Unable to get image virtual_size"); 4362 goto err; 4363 } 4364 4365 virtual_size = ROUND_UP(ssize, cluster_size); 4366 4367 if (has_backing_file) { 4368 /* We don't how much of the backing chain is shared by the input 4369 * image and the new image file. In the worst case the new image's 4370 * backing file has nothing in common with the input image. Be 4371 * conservative and assume all clusters need to be written. 4372 */ 4373 required = virtual_size; 4374 } else { 4375 int64_t offset; 4376 int64_t pnum = 0; 4377 4378 for (offset = 0; offset < ssize; offset += pnum) { 4379 int ret; 4380 4381 ret = bdrv_block_status_above(in_bs, NULL, offset, 4382 ssize - offset, &pnum, NULL, 4383 NULL); 4384 if (ret < 0) { 4385 error_setg_errno(&local_err, -ret, 4386 "Unable to get block status"); 4387 goto err; 4388 } 4389 4390 if (ret & BDRV_BLOCK_ZERO) { 4391 /* Skip zero regions (safe with no backing file) */ 4392 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 4393 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 4394 /* Extend pnum to end of cluster for next iteration */ 4395 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 4396 4397 /* Count clusters we've seen */ 4398 required += offset % cluster_size + pnum; 4399 } 4400 } 4401 } 4402 } 4403 4404 /* Take into account preallocation. Nothing special is needed for 4405 * PREALLOC_MODE_METADATA since metadata is always counted. 4406 */ 4407 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 4408 required = virtual_size; 4409 } 4410 4411 info = g_new(BlockMeasureInfo, 1); 4412 info->fully_allocated = 4413 qcow2_calc_prealloc_size(virtual_size, cluster_size, 4414 ctz32(refcount_bits)) + luks_payload_size; 4415 4416 /* Remove data clusters that are not required. This overestimates the 4417 * required size because metadata needed for the fully allocated file is 4418 * still counted. 4419 */ 4420 info->required = info->fully_allocated - virtual_size + required; 4421 return info; 4422 4423 err: 4424 error_propagate(errp, local_err); 4425 return NULL; 4426 } 4427 4428 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4429 { 4430 BDRVQcow2State *s = bs->opaque; 4431 bdi->unallocated_blocks_are_zero = true; 4432 bdi->cluster_size = s->cluster_size; 4433 bdi->vm_state_offset = qcow2_vm_state_offset(s); 4434 return 0; 4435 } 4436 4437 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 4438 Error **errp) 4439 { 4440 BDRVQcow2State *s = bs->opaque; 4441 ImageInfoSpecific *spec_info; 4442 QCryptoBlockInfo *encrypt_info = NULL; 4443 Error *local_err = NULL; 4444 4445 if (s->crypto != NULL) { 4446 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err); 4447 if (local_err) { 4448 error_propagate(errp, local_err); 4449 return NULL; 4450 } 4451 } 4452 4453 spec_info = g_new(ImageInfoSpecific, 1); 4454 *spec_info = (ImageInfoSpecific){ 4455 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 4456 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 4457 }; 4458 if (s->qcow_version == 2) { 4459 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4460 .compat = g_strdup("0.10"), 4461 .refcount_bits = s->refcount_bits, 4462 }; 4463 } else if (s->qcow_version == 3) { 4464 Qcow2BitmapInfoList *bitmaps; 4465 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err); 4466 if (local_err) { 4467 error_propagate(errp, local_err); 4468 qapi_free_ImageInfoSpecific(spec_info); 4469 return NULL; 4470 } 4471 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4472 .compat = g_strdup("1.1"), 4473 .lazy_refcounts = s->compatible_features & 4474 QCOW2_COMPAT_LAZY_REFCOUNTS, 4475 .has_lazy_refcounts = true, 4476 .corrupt = s->incompatible_features & 4477 QCOW2_INCOMPAT_CORRUPT, 4478 .has_corrupt = true, 4479 .refcount_bits = s->refcount_bits, 4480 .has_bitmaps = !!bitmaps, 4481 .bitmaps = bitmaps, 4482 }; 4483 } else { 4484 /* if this assertion fails, this probably means a new version was 4485 * added without having it covered here */ 4486 assert(false); 4487 } 4488 4489 if (encrypt_info) { 4490 ImageInfoSpecificQCow2Encryption *qencrypt = 4491 g_new(ImageInfoSpecificQCow2Encryption, 1); 4492 switch (encrypt_info->format) { 4493 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 4494 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 4495 break; 4496 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 4497 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 4498 qencrypt->u.luks = encrypt_info->u.luks; 4499 break; 4500 default: 4501 abort(); 4502 } 4503 /* Since we did shallow copy above, erase any pointers 4504 * in the original info */ 4505 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 4506 qapi_free_QCryptoBlockInfo(encrypt_info); 4507 4508 spec_info->u.qcow2.data->has_encrypt = true; 4509 spec_info->u.qcow2.data->encrypt = qencrypt; 4510 } 4511 4512 return spec_info; 4513 } 4514 4515 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4516 int64_t pos) 4517 { 4518 BDRVQcow2State *s = bs->opaque; 4519 4520 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 4521 return bs->drv->bdrv_co_pwritev(bs, qcow2_vm_state_offset(s) + pos, 4522 qiov->size, qiov, 0); 4523 } 4524 4525 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4526 int64_t pos) 4527 { 4528 BDRVQcow2State *s = bs->opaque; 4529 4530 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 4531 return bs->drv->bdrv_co_preadv(bs, qcow2_vm_state_offset(s) + pos, 4532 qiov->size, qiov, 0); 4533 } 4534 4535 /* 4536 * Downgrades an image's version. To achieve this, any incompatible features 4537 * have to be removed. 4538 */ 4539 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 4540 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 4541 Error **errp) 4542 { 4543 BDRVQcow2State *s = bs->opaque; 4544 int current_version = s->qcow_version; 4545 int ret; 4546 4547 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 4548 assert(target_version < current_version); 4549 4550 /* There are no other versions (now) that you can downgrade to */ 4551 assert(target_version == 2); 4552 4553 if (s->refcount_order != 4) { 4554 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 4555 return -ENOTSUP; 4556 } 4557 4558 /* clear incompatible features */ 4559 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 4560 ret = qcow2_mark_clean(bs); 4561 if (ret < 0) { 4562 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4563 return ret; 4564 } 4565 } 4566 4567 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 4568 * the first place; if that happens nonetheless, returning -ENOTSUP is the 4569 * best thing to do anyway */ 4570 4571 if (s->incompatible_features) { 4572 error_setg(errp, "Cannot downgrade an image with incompatible features " 4573 "%#" PRIx64 " set", s->incompatible_features); 4574 return -ENOTSUP; 4575 } 4576 4577 /* since we can ignore compatible features, we can set them to 0 as well */ 4578 s->compatible_features = 0; 4579 /* if lazy refcounts have been used, they have already been fixed through 4580 * clearing the dirty flag */ 4581 4582 /* clearing autoclear features is trivial */ 4583 s->autoclear_features = 0; 4584 4585 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 4586 if (ret < 0) { 4587 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 4588 return ret; 4589 } 4590 4591 s->qcow_version = target_version; 4592 ret = qcow2_update_header(bs); 4593 if (ret < 0) { 4594 s->qcow_version = current_version; 4595 error_setg_errno(errp, -ret, "Failed to update the image header"); 4596 return ret; 4597 } 4598 return 0; 4599 } 4600 4601 typedef enum Qcow2AmendOperation { 4602 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 4603 * statically initialized to so that the helper CB can discern the first 4604 * invocation from an operation change */ 4605 QCOW2_NO_OPERATION = 0, 4606 4607 QCOW2_CHANGING_REFCOUNT_ORDER, 4608 QCOW2_DOWNGRADING, 4609 } Qcow2AmendOperation; 4610 4611 typedef struct Qcow2AmendHelperCBInfo { 4612 /* The code coordinating the amend operations should only modify 4613 * these four fields; the rest will be managed by the CB */ 4614 BlockDriverAmendStatusCB *original_status_cb; 4615 void *original_cb_opaque; 4616 4617 Qcow2AmendOperation current_operation; 4618 4619 /* Total number of operations to perform (only set once) */ 4620 int total_operations; 4621 4622 /* The following fields are managed by the CB */ 4623 4624 /* Number of operations completed */ 4625 int operations_completed; 4626 4627 /* Cumulative offset of all completed operations */ 4628 int64_t offset_completed; 4629 4630 Qcow2AmendOperation last_operation; 4631 int64_t last_work_size; 4632 } Qcow2AmendHelperCBInfo; 4633 4634 static void qcow2_amend_helper_cb(BlockDriverState *bs, 4635 int64_t operation_offset, 4636 int64_t operation_work_size, void *opaque) 4637 { 4638 Qcow2AmendHelperCBInfo *info = opaque; 4639 int64_t current_work_size; 4640 int64_t projected_work_size; 4641 4642 if (info->current_operation != info->last_operation) { 4643 if (info->last_operation != QCOW2_NO_OPERATION) { 4644 info->offset_completed += info->last_work_size; 4645 info->operations_completed++; 4646 } 4647 4648 info->last_operation = info->current_operation; 4649 } 4650 4651 assert(info->total_operations > 0); 4652 assert(info->operations_completed < info->total_operations); 4653 4654 info->last_work_size = operation_work_size; 4655 4656 current_work_size = info->offset_completed + operation_work_size; 4657 4658 /* current_work_size is the total work size for (operations_completed + 1) 4659 * operations (which includes this one), so multiply it by the number of 4660 * operations not covered and divide it by the number of operations 4661 * covered to get a projection for the operations not covered */ 4662 projected_work_size = current_work_size * (info->total_operations - 4663 info->operations_completed - 1) 4664 / (info->operations_completed + 1); 4665 4666 info->original_status_cb(bs, info->offset_completed + operation_offset, 4667 current_work_size + projected_work_size, 4668 info->original_cb_opaque); 4669 } 4670 4671 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 4672 BlockDriverAmendStatusCB *status_cb, 4673 void *cb_opaque, 4674 Error **errp) 4675 { 4676 BDRVQcow2State *s = bs->opaque; 4677 int old_version = s->qcow_version, new_version = old_version; 4678 uint64_t new_size = 0; 4679 const char *backing_file = NULL, *backing_format = NULL; 4680 bool lazy_refcounts = s->use_lazy_refcounts; 4681 const char *compat = NULL; 4682 uint64_t cluster_size = s->cluster_size; 4683 bool encrypt; 4684 int encformat; 4685 int refcount_bits = s->refcount_bits; 4686 int ret; 4687 QemuOptDesc *desc = opts->list->desc; 4688 Qcow2AmendHelperCBInfo helper_cb_info; 4689 4690 while (desc && desc->name) { 4691 if (!qemu_opt_find(opts, desc->name)) { 4692 /* only change explicitly defined options */ 4693 desc++; 4694 continue; 4695 } 4696 4697 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 4698 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 4699 if (!compat) { 4700 /* preserve default */ 4701 } else if (!strcmp(compat, "0.10")) { 4702 new_version = 2; 4703 } else if (!strcmp(compat, "1.1")) { 4704 new_version = 3; 4705 } else { 4706 error_setg(errp, "Unknown compatibility level %s", compat); 4707 return -EINVAL; 4708 } 4709 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 4710 error_setg(errp, "Cannot change preallocation mode"); 4711 return -ENOTSUP; 4712 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 4713 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 4714 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 4715 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 4716 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 4717 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 4718 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 4719 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 4720 !!s->crypto); 4721 4722 if (encrypt != !!s->crypto) { 4723 error_setg(errp, 4724 "Changing the encryption flag is not supported"); 4725 return -ENOTSUP; 4726 } 4727 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 4728 encformat = qcow2_crypt_method_from_format( 4729 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 4730 4731 if (encformat != s->crypt_method_header) { 4732 error_setg(errp, 4733 "Changing the encryption format is not supported"); 4734 return -ENOTSUP; 4735 } 4736 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 4737 error_setg(errp, 4738 "Changing the encryption parameters is not supported"); 4739 return -ENOTSUP; 4740 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 4741 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 4742 cluster_size); 4743 if (cluster_size != s->cluster_size) { 4744 error_setg(errp, "Changing the cluster size is not supported"); 4745 return -ENOTSUP; 4746 } 4747 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 4748 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 4749 lazy_refcounts); 4750 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 4751 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 4752 refcount_bits); 4753 4754 if (refcount_bits <= 0 || refcount_bits > 64 || 4755 !is_power_of_2(refcount_bits)) 4756 { 4757 error_setg(errp, "Refcount width must be a power of two and " 4758 "may not exceed 64 bits"); 4759 return -EINVAL; 4760 } 4761 } else { 4762 /* if this point is reached, this probably means a new option was 4763 * added without having it covered here */ 4764 abort(); 4765 } 4766 4767 desc++; 4768 } 4769 4770 helper_cb_info = (Qcow2AmendHelperCBInfo){ 4771 .original_status_cb = status_cb, 4772 .original_cb_opaque = cb_opaque, 4773 .total_operations = (new_version < old_version) 4774 + (s->refcount_bits != refcount_bits) 4775 }; 4776 4777 /* Upgrade first (some features may require compat=1.1) */ 4778 if (new_version > old_version) { 4779 s->qcow_version = new_version; 4780 ret = qcow2_update_header(bs); 4781 if (ret < 0) { 4782 s->qcow_version = old_version; 4783 error_setg_errno(errp, -ret, "Failed to update the image header"); 4784 return ret; 4785 } 4786 } 4787 4788 if (s->refcount_bits != refcount_bits) { 4789 int refcount_order = ctz32(refcount_bits); 4790 4791 if (new_version < 3 && refcount_bits != 16) { 4792 error_setg(errp, "Refcount widths other than 16 bits require " 4793 "compatibility level 1.1 or above (use compat=1.1 or " 4794 "greater)"); 4795 return -EINVAL; 4796 } 4797 4798 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 4799 ret = qcow2_change_refcount_order(bs, refcount_order, 4800 &qcow2_amend_helper_cb, 4801 &helper_cb_info, errp); 4802 if (ret < 0) { 4803 return ret; 4804 } 4805 } 4806 4807 if (backing_file || backing_format) { 4808 ret = qcow2_change_backing_file(bs, 4809 backing_file ?: s->image_backing_file, 4810 backing_format ?: s->image_backing_format); 4811 if (ret < 0) { 4812 error_setg_errno(errp, -ret, "Failed to change the backing file"); 4813 return ret; 4814 } 4815 } 4816 4817 if (s->use_lazy_refcounts != lazy_refcounts) { 4818 if (lazy_refcounts) { 4819 if (new_version < 3) { 4820 error_setg(errp, "Lazy refcounts only supported with " 4821 "compatibility level 1.1 and above (use compat=1.1 " 4822 "or greater)"); 4823 return -EINVAL; 4824 } 4825 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4826 ret = qcow2_update_header(bs); 4827 if (ret < 0) { 4828 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4829 error_setg_errno(errp, -ret, "Failed to update the image header"); 4830 return ret; 4831 } 4832 s->use_lazy_refcounts = true; 4833 } else { 4834 /* make image clean first */ 4835 ret = qcow2_mark_clean(bs); 4836 if (ret < 0) { 4837 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4838 return ret; 4839 } 4840 /* now disallow lazy refcounts */ 4841 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 4842 ret = qcow2_update_header(bs); 4843 if (ret < 0) { 4844 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 4845 error_setg_errno(errp, -ret, "Failed to update the image header"); 4846 return ret; 4847 } 4848 s->use_lazy_refcounts = false; 4849 } 4850 } 4851 4852 if (new_size) { 4853 BlockBackend *blk = blk_new(BLK_PERM_RESIZE, BLK_PERM_ALL); 4854 ret = blk_insert_bs(blk, bs, errp); 4855 if (ret < 0) { 4856 blk_unref(blk); 4857 return ret; 4858 } 4859 4860 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, errp); 4861 blk_unref(blk); 4862 if (ret < 0) { 4863 return ret; 4864 } 4865 } 4866 4867 /* Downgrade last (so unsupported features can be removed before) */ 4868 if (new_version < old_version) { 4869 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 4870 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 4871 &helper_cb_info, errp); 4872 if (ret < 0) { 4873 return ret; 4874 } 4875 } 4876 4877 return 0; 4878 } 4879 4880 /* 4881 * If offset or size are negative, respectively, they will not be included in 4882 * the BLOCK_IMAGE_CORRUPTED event emitted. 4883 * fatal will be ignored for read-only BDS; corruptions found there will always 4884 * be considered non-fatal. 4885 */ 4886 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 4887 int64_t size, const char *message_format, ...) 4888 { 4889 BDRVQcow2State *s = bs->opaque; 4890 const char *node_name; 4891 char *message; 4892 va_list ap; 4893 4894 fatal = fatal && bdrv_is_writable(bs); 4895 4896 if (s->signaled_corruption && 4897 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 4898 { 4899 return; 4900 } 4901 4902 va_start(ap, message_format); 4903 message = g_strdup_vprintf(message_format, ap); 4904 va_end(ap); 4905 4906 if (fatal) { 4907 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 4908 "corruption events will be suppressed\n", message); 4909 } else { 4910 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 4911 "corruption events will be suppressed\n", message); 4912 } 4913 4914 node_name = bdrv_get_node_name(bs); 4915 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 4916 *node_name != '\0', node_name, 4917 message, offset >= 0, offset, 4918 size >= 0, size, 4919 fatal); 4920 g_free(message); 4921 4922 if (fatal) { 4923 qcow2_mark_corrupt(bs); 4924 bs->drv = NULL; /* make BDS unusable */ 4925 } 4926 4927 s->signaled_corruption = true; 4928 } 4929 4930 static QemuOptsList qcow2_create_opts = { 4931 .name = "qcow2-create-opts", 4932 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 4933 .desc = { 4934 { 4935 .name = BLOCK_OPT_SIZE, 4936 .type = QEMU_OPT_SIZE, 4937 .help = "Virtual disk size" 4938 }, 4939 { 4940 .name = BLOCK_OPT_COMPAT_LEVEL, 4941 .type = QEMU_OPT_STRING, 4942 .help = "Compatibility level (0.10 or 1.1)" 4943 }, 4944 { 4945 .name = BLOCK_OPT_BACKING_FILE, 4946 .type = QEMU_OPT_STRING, 4947 .help = "File name of a base image" 4948 }, 4949 { 4950 .name = BLOCK_OPT_BACKING_FMT, 4951 .type = QEMU_OPT_STRING, 4952 .help = "Image format of the base image" 4953 }, 4954 { 4955 .name = BLOCK_OPT_ENCRYPT, 4956 .type = QEMU_OPT_BOOL, 4957 .help = "Encrypt the image with format 'aes'. (Deprecated " 4958 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 4959 }, 4960 { 4961 .name = BLOCK_OPT_ENCRYPT_FORMAT, 4962 .type = QEMU_OPT_STRING, 4963 .help = "Encrypt the image, format choices: 'aes', 'luks'", 4964 }, 4965 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 4966 "ID of secret providing qcow AES key or LUKS passphrase"), 4967 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 4968 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 4969 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 4970 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 4971 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 4972 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 4973 { 4974 .name = BLOCK_OPT_CLUSTER_SIZE, 4975 .type = QEMU_OPT_SIZE, 4976 .help = "qcow2 cluster size", 4977 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 4978 }, 4979 { 4980 .name = BLOCK_OPT_PREALLOC, 4981 .type = QEMU_OPT_STRING, 4982 .help = "Preallocation mode (allowed values: off, metadata, " 4983 "falloc, full)" 4984 }, 4985 { 4986 .name = BLOCK_OPT_LAZY_REFCOUNTS, 4987 .type = QEMU_OPT_BOOL, 4988 .help = "Postpone refcount updates", 4989 .def_value_str = "off" 4990 }, 4991 { 4992 .name = BLOCK_OPT_REFCOUNT_BITS, 4993 .type = QEMU_OPT_NUMBER, 4994 .help = "Width of a reference count entry in bits", 4995 .def_value_str = "16" 4996 }, 4997 { /* end of list */ } 4998 } 4999 }; 5000 5001 static const char *const qcow2_strong_runtime_opts[] = { 5002 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 5003 5004 NULL 5005 }; 5006 5007 BlockDriver bdrv_qcow2 = { 5008 .format_name = "qcow2", 5009 .instance_size = sizeof(BDRVQcow2State), 5010 .bdrv_probe = qcow2_probe, 5011 .bdrv_open = qcow2_open, 5012 .bdrv_close = qcow2_close, 5013 .bdrv_reopen_prepare = qcow2_reopen_prepare, 5014 .bdrv_reopen_commit = qcow2_reopen_commit, 5015 .bdrv_reopen_abort = qcow2_reopen_abort, 5016 .bdrv_join_options = qcow2_join_options, 5017 .bdrv_child_perm = bdrv_format_default_perms, 5018 .bdrv_co_create_opts = qcow2_co_create_opts, 5019 .bdrv_co_create = qcow2_co_create, 5020 .bdrv_has_zero_init = bdrv_has_zero_init_1, 5021 .bdrv_co_block_status = qcow2_co_block_status, 5022 5023 .bdrv_co_preadv = qcow2_co_preadv, 5024 .bdrv_co_pwritev = qcow2_co_pwritev, 5025 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 5026 5027 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 5028 .bdrv_co_pdiscard = qcow2_co_pdiscard, 5029 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 5030 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 5031 .bdrv_co_truncate = qcow2_co_truncate, 5032 .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, 5033 .bdrv_make_empty = qcow2_make_empty, 5034 5035 .bdrv_snapshot_create = qcow2_snapshot_create, 5036 .bdrv_snapshot_goto = qcow2_snapshot_goto, 5037 .bdrv_snapshot_delete = qcow2_snapshot_delete, 5038 .bdrv_snapshot_list = qcow2_snapshot_list, 5039 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 5040 .bdrv_measure = qcow2_measure, 5041 .bdrv_get_info = qcow2_get_info, 5042 .bdrv_get_specific_info = qcow2_get_specific_info, 5043 5044 .bdrv_save_vmstate = qcow2_save_vmstate, 5045 .bdrv_load_vmstate = qcow2_load_vmstate, 5046 5047 .supports_backing = true, 5048 .bdrv_change_backing_file = qcow2_change_backing_file, 5049 5050 .bdrv_refresh_limits = qcow2_refresh_limits, 5051 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 5052 .bdrv_inactivate = qcow2_inactivate, 5053 5054 .create_opts = &qcow2_create_opts, 5055 .strong_runtime_opts = qcow2_strong_runtime_opts, 5056 .bdrv_co_check = qcow2_co_check, 5057 .bdrv_amend_options = qcow2_amend_options, 5058 5059 .bdrv_detach_aio_context = qcow2_detach_aio_context, 5060 .bdrv_attach_aio_context = qcow2_attach_aio_context, 5061 5062 .bdrv_reopen_bitmaps_rw = qcow2_reopen_bitmaps_rw, 5063 .bdrv_can_store_new_dirty_bitmap = qcow2_can_store_new_dirty_bitmap, 5064 .bdrv_remove_persistent_dirty_bitmap = qcow2_remove_persistent_dirty_bitmap, 5065 }; 5066 5067 static void bdrv_qcow2_init(void) 5068 { 5069 bdrv_register(&bdrv_qcow2); 5070 } 5071 5072 block_init(bdrv_qcow2_init); 5073