1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "sysemu/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qapi/qmp/qdict.h" 36 #include "qapi/qmp/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qapi/qobject-input-visitor.h" 42 #include "qapi/qapi-visit-block-core.h" 43 #include "crypto.h" 44 #include "block/aio_task.h" 45 46 /* 47 Differences with QCOW: 48 49 - Support for multiple incremental snapshots. 50 - Memory management by reference counts. 51 - Clusters which have a reference count of one have the bit 52 QCOW_OFLAG_COPIED to optimize write performance. 53 - Size of compressed clusters is stored in sectors to reduce bit usage 54 in the cluster offsets. 55 - Support for storing additional data (such as the VM state) in the 56 snapshots. 57 - If a backing store is used, the cluster size is not constrained 58 (could be backported to QCOW). 59 - L2 tables have always a size of one cluster. 60 */ 61 62 63 typedef struct { 64 uint32_t magic; 65 uint32_t len; 66 } QEMU_PACKED QCowExtension; 67 68 #define QCOW2_EXT_MAGIC_END 0 69 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 70 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 71 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 72 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 73 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 74 75 static int coroutine_fn 76 qcow2_co_preadv_compressed(BlockDriverState *bs, 77 uint64_t file_cluster_offset, 78 uint64_t offset, 79 uint64_t bytes, 80 QEMUIOVector *qiov, 81 size_t qiov_offset); 82 83 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 84 { 85 const QCowHeader *cow_header = (const void *)buf; 86 87 if (buf_size >= sizeof(QCowHeader) && 88 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 89 be32_to_cpu(cow_header->version) >= 2) 90 return 100; 91 else 92 return 0; 93 } 94 95 96 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 97 uint8_t *buf, size_t buflen, 98 void *opaque, Error **errp) 99 { 100 BlockDriverState *bs = opaque; 101 BDRVQcow2State *s = bs->opaque; 102 ssize_t ret; 103 104 if ((offset + buflen) > s->crypto_header.length) { 105 error_setg(errp, "Request for data outside of extension header"); 106 return -1; 107 } 108 109 ret = bdrv_pread(bs->file, 110 s->crypto_header.offset + offset, buf, buflen); 111 if (ret < 0) { 112 error_setg_errno(errp, -ret, "Could not read encryption header"); 113 return -1; 114 } 115 return ret; 116 } 117 118 119 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 120 void *opaque, Error **errp) 121 { 122 BlockDriverState *bs = opaque; 123 BDRVQcow2State *s = bs->opaque; 124 int64_t ret; 125 int64_t clusterlen; 126 127 ret = qcow2_alloc_clusters(bs, headerlen); 128 if (ret < 0) { 129 error_setg_errno(errp, -ret, 130 "Cannot allocate cluster for LUKS header size %zu", 131 headerlen); 132 return -1; 133 } 134 135 s->crypto_header.length = headerlen; 136 s->crypto_header.offset = ret; 137 138 /* Zero fill remaining space in cluster so it has predictable 139 * content in case of future spec changes */ 140 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 141 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 142 ret = bdrv_pwrite_zeroes(bs->file, 143 ret + headerlen, 144 clusterlen - headerlen, 0); 145 if (ret < 0) { 146 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 147 return -1; 148 } 149 150 return ret; 151 } 152 153 154 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 155 const uint8_t *buf, size_t buflen, 156 void *opaque, Error **errp) 157 { 158 BlockDriverState *bs = opaque; 159 BDRVQcow2State *s = bs->opaque; 160 ssize_t ret; 161 162 if ((offset + buflen) > s->crypto_header.length) { 163 error_setg(errp, "Request for data outside of extension header"); 164 return -1; 165 } 166 167 ret = bdrv_pwrite(bs->file, 168 s->crypto_header.offset + offset, buf, buflen); 169 if (ret < 0) { 170 error_setg_errno(errp, -ret, "Could not read encryption header"); 171 return -1; 172 } 173 return ret; 174 } 175 176 177 /* 178 * read qcow2 extension and fill bs 179 * start reading from start_offset 180 * finish reading upon magic of value 0 or when end_offset reached 181 * unknown magic is skipped (future extension this version knows nothing about) 182 * return 0 upon success, non-0 otherwise 183 */ 184 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 185 uint64_t end_offset, void **p_feature_table, 186 int flags, bool *need_update_header, 187 Error **errp) 188 { 189 BDRVQcow2State *s = bs->opaque; 190 QCowExtension ext; 191 uint64_t offset; 192 int ret; 193 Qcow2BitmapHeaderExt bitmaps_ext; 194 195 if (need_update_header != NULL) { 196 *need_update_header = false; 197 } 198 199 #ifdef DEBUG_EXT 200 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 201 #endif 202 offset = start_offset; 203 while (offset < end_offset) { 204 205 #ifdef DEBUG_EXT 206 /* Sanity check */ 207 if (offset > s->cluster_size) 208 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 209 210 printf("attempting to read extended header in offset %lu\n", offset); 211 #endif 212 213 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 214 if (ret < 0) { 215 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 216 "pread fail from offset %" PRIu64, offset); 217 return 1; 218 } 219 ext.magic = be32_to_cpu(ext.magic); 220 ext.len = be32_to_cpu(ext.len); 221 offset += sizeof(ext); 222 #ifdef DEBUG_EXT 223 printf("ext.magic = 0x%x\n", ext.magic); 224 #endif 225 if (offset > end_offset || ext.len > end_offset - offset) { 226 error_setg(errp, "Header extension too large"); 227 return -EINVAL; 228 } 229 230 switch (ext.magic) { 231 case QCOW2_EXT_MAGIC_END: 232 return 0; 233 234 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 235 if (ext.len >= sizeof(bs->backing_format)) { 236 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 237 " too large (>=%zu)", ext.len, 238 sizeof(bs->backing_format)); 239 return 2; 240 } 241 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 242 if (ret < 0) { 243 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 244 "Could not read format name"); 245 return 3; 246 } 247 bs->backing_format[ext.len] = '\0'; 248 s->image_backing_format = g_strdup(bs->backing_format); 249 #ifdef DEBUG_EXT 250 printf("Qcow2: Got format extension %s\n", bs->backing_format); 251 #endif 252 break; 253 254 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 255 if (p_feature_table != NULL) { 256 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 257 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 258 if (ret < 0) { 259 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 260 "Could not read table"); 261 return ret; 262 } 263 264 *p_feature_table = feature_table; 265 } 266 break; 267 268 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 269 unsigned int cflags = 0; 270 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 271 error_setg(errp, "CRYPTO header extension only " 272 "expected with LUKS encryption method"); 273 return -EINVAL; 274 } 275 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 276 error_setg(errp, "CRYPTO header extension size %u, " 277 "but expected size %zu", ext.len, 278 sizeof(Qcow2CryptoHeaderExtension)); 279 return -EINVAL; 280 } 281 282 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 283 if (ret < 0) { 284 error_setg_errno(errp, -ret, 285 "Unable to read CRYPTO header extension"); 286 return ret; 287 } 288 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 289 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 290 291 if ((s->crypto_header.offset % s->cluster_size) != 0) { 292 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 293 "not a multiple of cluster size '%u'", 294 s->crypto_header.offset, s->cluster_size); 295 return -EINVAL; 296 } 297 298 if (flags & BDRV_O_NO_IO) { 299 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 300 } 301 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 302 qcow2_crypto_hdr_read_func, 303 bs, cflags, QCOW2_MAX_THREADS, errp); 304 if (!s->crypto) { 305 return -EINVAL; 306 } 307 } break; 308 309 case QCOW2_EXT_MAGIC_BITMAPS: 310 if (ext.len != sizeof(bitmaps_ext)) { 311 error_setg_errno(errp, -ret, "bitmaps_ext: " 312 "Invalid extension length"); 313 return -EINVAL; 314 } 315 316 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 317 if (s->qcow_version < 3) { 318 /* Let's be a bit more specific */ 319 warn_report("This qcow2 v2 image contains bitmaps, but " 320 "they may have been modified by a program " 321 "without persistent bitmap support; so now " 322 "they must all be considered inconsistent"); 323 } else { 324 warn_report("a program lacking bitmap support " 325 "modified this file, so all bitmaps are now " 326 "considered inconsistent"); 327 } 328 error_printf("Some clusters may be leaked, " 329 "run 'qemu-img check -r' on the image " 330 "file to fix."); 331 if (need_update_header != NULL) { 332 /* Updating is needed to drop invalid bitmap extension. */ 333 *need_update_header = true; 334 } 335 break; 336 } 337 338 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 339 if (ret < 0) { 340 error_setg_errno(errp, -ret, "bitmaps_ext: " 341 "Could not read ext header"); 342 return ret; 343 } 344 345 if (bitmaps_ext.reserved32 != 0) { 346 error_setg_errno(errp, -ret, "bitmaps_ext: " 347 "Reserved field is not zero"); 348 return -EINVAL; 349 } 350 351 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 352 bitmaps_ext.bitmap_directory_size = 353 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 354 bitmaps_ext.bitmap_directory_offset = 355 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 356 357 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 358 error_setg(errp, 359 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 360 "exceeding the QEMU supported maximum of %d", 361 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 362 return -EINVAL; 363 } 364 365 if (bitmaps_ext.nb_bitmaps == 0) { 366 error_setg(errp, "found bitmaps extension with zero bitmaps"); 367 return -EINVAL; 368 } 369 370 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 371 error_setg(errp, "bitmaps_ext: " 372 "invalid bitmap directory offset"); 373 return -EINVAL; 374 } 375 376 if (bitmaps_ext.bitmap_directory_size > 377 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 378 error_setg(errp, "bitmaps_ext: " 379 "bitmap directory size (%" PRIu64 ") exceeds " 380 "the maximum supported size (%d)", 381 bitmaps_ext.bitmap_directory_size, 382 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 383 return -EINVAL; 384 } 385 386 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 387 s->bitmap_directory_offset = 388 bitmaps_ext.bitmap_directory_offset; 389 s->bitmap_directory_size = 390 bitmaps_ext.bitmap_directory_size; 391 392 #ifdef DEBUG_EXT 393 printf("Qcow2: Got bitmaps extension: " 394 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 395 s->bitmap_directory_offset, s->nb_bitmaps); 396 #endif 397 break; 398 399 case QCOW2_EXT_MAGIC_DATA_FILE: 400 { 401 s->image_data_file = g_malloc0(ext.len + 1); 402 ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len); 403 if (ret < 0) { 404 error_setg_errno(errp, -ret, 405 "ERROR: Could not read data file name"); 406 return ret; 407 } 408 #ifdef DEBUG_EXT 409 printf("Qcow2: Got external data file %s\n", s->image_data_file); 410 #endif 411 break; 412 } 413 414 default: 415 /* unknown magic - save it in case we need to rewrite the header */ 416 /* If you add a new feature, make sure to also update the fast 417 * path of qcow2_make_empty() to deal with it. */ 418 { 419 Qcow2UnknownHeaderExtension *uext; 420 421 uext = g_malloc0(sizeof(*uext) + ext.len); 422 uext->magic = ext.magic; 423 uext->len = ext.len; 424 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 425 426 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 427 if (ret < 0) { 428 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 429 "Could not read data"); 430 return ret; 431 } 432 } 433 break; 434 } 435 436 offset += ((ext.len + 7) & ~7); 437 } 438 439 return 0; 440 } 441 442 static void cleanup_unknown_header_ext(BlockDriverState *bs) 443 { 444 BDRVQcow2State *s = bs->opaque; 445 Qcow2UnknownHeaderExtension *uext, *next; 446 447 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 448 QLIST_REMOVE(uext, next); 449 g_free(uext); 450 } 451 } 452 453 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 454 uint64_t mask) 455 { 456 char *features = g_strdup(""); 457 char *old; 458 459 while (table && table->name[0] != '\0') { 460 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 461 if (mask & (1ULL << table->bit)) { 462 old = features; 463 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 464 table->name); 465 g_free(old); 466 mask &= ~(1ULL << table->bit); 467 } 468 } 469 table++; 470 } 471 472 if (mask) { 473 old = features; 474 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 475 old, *old ? ", " : "", mask); 476 g_free(old); 477 } 478 479 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 480 g_free(features); 481 } 482 483 /* 484 * Sets the dirty bit and flushes afterwards if necessary. 485 * 486 * The incompatible_features bit is only set if the image file header was 487 * updated successfully. Therefore it is not required to check the return 488 * value of this function. 489 */ 490 int qcow2_mark_dirty(BlockDriverState *bs) 491 { 492 BDRVQcow2State *s = bs->opaque; 493 uint64_t val; 494 int ret; 495 496 assert(s->qcow_version >= 3); 497 498 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 499 return 0; /* already dirty */ 500 } 501 502 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 503 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 504 &val, sizeof(val)); 505 if (ret < 0) { 506 return ret; 507 } 508 ret = bdrv_flush(bs->file->bs); 509 if (ret < 0) { 510 return ret; 511 } 512 513 /* Only treat image as dirty if the header was updated successfully */ 514 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 515 return 0; 516 } 517 518 /* 519 * Clears the dirty bit and flushes before if necessary. Only call this 520 * function when there are no pending requests, it does not guard against 521 * concurrent requests dirtying the image. 522 */ 523 static int qcow2_mark_clean(BlockDriverState *bs) 524 { 525 BDRVQcow2State *s = bs->opaque; 526 527 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 528 int ret; 529 530 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 531 532 ret = qcow2_flush_caches(bs); 533 if (ret < 0) { 534 return ret; 535 } 536 537 return qcow2_update_header(bs); 538 } 539 return 0; 540 } 541 542 /* 543 * Marks the image as corrupt. 544 */ 545 int qcow2_mark_corrupt(BlockDriverState *bs) 546 { 547 BDRVQcow2State *s = bs->opaque; 548 549 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 550 return qcow2_update_header(bs); 551 } 552 553 /* 554 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 555 * before if necessary. 556 */ 557 int qcow2_mark_consistent(BlockDriverState *bs) 558 { 559 BDRVQcow2State *s = bs->opaque; 560 561 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 562 int ret = qcow2_flush_caches(bs); 563 if (ret < 0) { 564 return ret; 565 } 566 567 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 568 return qcow2_update_header(bs); 569 } 570 return 0; 571 } 572 573 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs, 574 BdrvCheckResult *result, 575 BdrvCheckMode fix) 576 { 577 int ret = qcow2_check_refcounts(bs, result, fix); 578 if (ret < 0) { 579 return ret; 580 } 581 582 if (fix && result->check_errors == 0 && result->corruptions == 0) { 583 ret = qcow2_mark_clean(bs); 584 if (ret < 0) { 585 return ret; 586 } 587 return qcow2_mark_consistent(bs); 588 } 589 return ret; 590 } 591 592 static int coroutine_fn qcow2_co_check(BlockDriverState *bs, 593 BdrvCheckResult *result, 594 BdrvCheckMode fix) 595 { 596 BDRVQcow2State *s = bs->opaque; 597 int ret; 598 599 qemu_co_mutex_lock(&s->lock); 600 ret = qcow2_co_check_locked(bs, result, fix); 601 qemu_co_mutex_unlock(&s->lock); 602 return ret; 603 } 604 605 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 606 uint64_t entries, size_t entry_len, 607 int64_t max_size_bytes, const char *table_name, 608 Error **errp) 609 { 610 BDRVQcow2State *s = bs->opaque; 611 612 if (entries > max_size_bytes / entry_len) { 613 error_setg(errp, "%s too large", table_name); 614 return -EFBIG; 615 } 616 617 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 618 * because values will be passed to qemu functions taking int64_t. */ 619 if ((INT64_MAX - entries * entry_len < offset) || 620 (offset_into_cluster(s, offset) != 0)) { 621 error_setg(errp, "%s offset invalid", table_name); 622 return -EINVAL; 623 } 624 625 return 0; 626 } 627 628 static const char *const mutable_opts[] = { 629 QCOW2_OPT_LAZY_REFCOUNTS, 630 QCOW2_OPT_DISCARD_REQUEST, 631 QCOW2_OPT_DISCARD_SNAPSHOT, 632 QCOW2_OPT_DISCARD_OTHER, 633 QCOW2_OPT_OVERLAP, 634 QCOW2_OPT_OVERLAP_TEMPLATE, 635 QCOW2_OPT_OVERLAP_MAIN_HEADER, 636 QCOW2_OPT_OVERLAP_ACTIVE_L1, 637 QCOW2_OPT_OVERLAP_ACTIVE_L2, 638 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 639 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 640 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 QCOW2_OPT_OVERLAP_INACTIVE_L1, 642 QCOW2_OPT_OVERLAP_INACTIVE_L2, 643 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 644 QCOW2_OPT_CACHE_SIZE, 645 QCOW2_OPT_L2_CACHE_SIZE, 646 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 647 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 648 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 649 NULL 650 }; 651 652 static QemuOptsList qcow2_runtime_opts = { 653 .name = "qcow2", 654 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 655 .desc = { 656 { 657 .name = QCOW2_OPT_LAZY_REFCOUNTS, 658 .type = QEMU_OPT_BOOL, 659 .help = "Postpone refcount updates", 660 }, 661 { 662 .name = QCOW2_OPT_DISCARD_REQUEST, 663 .type = QEMU_OPT_BOOL, 664 .help = "Pass guest discard requests to the layer below", 665 }, 666 { 667 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 668 .type = QEMU_OPT_BOOL, 669 .help = "Generate discard requests when snapshot related space " 670 "is freed", 671 }, 672 { 673 .name = QCOW2_OPT_DISCARD_OTHER, 674 .type = QEMU_OPT_BOOL, 675 .help = "Generate discard requests when other clusters are freed", 676 }, 677 { 678 .name = QCOW2_OPT_OVERLAP, 679 .type = QEMU_OPT_STRING, 680 .help = "Selects which overlap checks to perform from a range of " 681 "templates (none, constant, cached, all)", 682 }, 683 { 684 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 685 .type = QEMU_OPT_STRING, 686 .help = "Selects which overlap checks to perform from a range of " 687 "templates (none, constant, cached, all)", 688 }, 689 { 690 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 691 .type = QEMU_OPT_BOOL, 692 .help = "Check for unintended writes into the main qcow2 header", 693 }, 694 { 695 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 696 .type = QEMU_OPT_BOOL, 697 .help = "Check for unintended writes into the active L1 table", 698 }, 699 { 700 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 701 .type = QEMU_OPT_BOOL, 702 .help = "Check for unintended writes into an active L2 table", 703 }, 704 { 705 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 706 .type = QEMU_OPT_BOOL, 707 .help = "Check for unintended writes into the refcount table", 708 }, 709 { 710 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 711 .type = QEMU_OPT_BOOL, 712 .help = "Check for unintended writes into a refcount block", 713 }, 714 { 715 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 716 .type = QEMU_OPT_BOOL, 717 .help = "Check for unintended writes into the snapshot table", 718 }, 719 { 720 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 721 .type = QEMU_OPT_BOOL, 722 .help = "Check for unintended writes into an inactive L1 table", 723 }, 724 { 725 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 726 .type = QEMU_OPT_BOOL, 727 .help = "Check for unintended writes into an inactive L2 table", 728 }, 729 { 730 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 731 .type = QEMU_OPT_BOOL, 732 .help = "Check for unintended writes into the bitmap directory", 733 }, 734 { 735 .name = QCOW2_OPT_CACHE_SIZE, 736 .type = QEMU_OPT_SIZE, 737 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 738 "cache size", 739 }, 740 { 741 .name = QCOW2_OPT_L2_CACHE_SIZE, 742 .type = QEMU_OPT_SIZE, 743 .help = "Maximum L2 table cache size", 744 }, 745 { 746 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 747 .type = QEMU_OPT_SIZE, 748 .help = "Size of each entry in the L2 cache", 749 }, 750 { 751 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 752 .type = QEMU_OPT_SIZE, 753 .help = "Maximum refcount block cache size", 754 }, 755 { 756 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 757 .type = QEMU_OPT_NUMBER, 758 .help = "Clean unused cache entries after this time (in seconds)", 759 }, 760 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 761 "ID of secret providing qcow2 AES key or LUKS passphrase"), 762 { /* end of list */ } 763 }, 764 }; 765 766 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 767 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 768 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 769 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 770 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 771 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 772 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 773 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 774 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 775 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 776 }; 777 778 static void cache_clean_timer_cb(void *opaque) 779 { 780 BlockDriverState *bs = opaque; 781 BDRVQcow2State *s = bs->opaque; 782 qcow2_cache_clean_unused(s->l2_table_cache); 783 qcow2_cache_clean_unused(s->refcount_block_cache); 784 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 785 (int64_t) s->cache_clean_interval * 1000); 786 } 787 788 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 789 { 790 BDRVQcow2State *s = bs->opaque; 791 if (s->cache_clean_interval > 0) { 792 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 793 SCALE_MS, cache_clean_timer_cb, 794 bs); 795 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 796 (int64_t) s->cache_clean_interval * 1000); 797 } 798 } 799 800 static void cache_clean_timer_del(BlockDriverState *bs) 801 { 802 BDRVQcow2State *s = bs->opaque; 803 if (s->cache_clean_timer) { 804 timer_del(s->cache_clean_timer); 805 timer_free(s->cache_clean_timer); 806 s->cache_clean_timer = NULL; 807 } 808 } 809 810 static void qcow2_detach_aio_context(BlockDriverState *bs) 811 { 812 cache_clean_timer_del(bs); 813 } 814 815 static void qcow2_attach_aio_context(BlockDriverState *bs, 816 AioContext *new_context) 817 { 818 cache_clean_timer_init(bs, new_context); 819 } 820 821 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 822 uint64_t *l2_cache_size, 823 uint64_t *l2_cache_entry_size, 824 uint64_t *refcount_cache_size, Error **errp) 825 { 826 BDRVQcow2State *s = bs->opaque; 827 uint64_t combined_cache_size, l2_cache_max_setting; 828 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 829 bool l2_cache_entry_size_set; 830 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 831 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 832 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 833 /* An L2 table is always one cluster in size so the max cache size 834 * should be a multiple of the cluster size. */ 835 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t), 836 s->cluster_size); 837 838 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 839 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 840 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 841 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 842 843 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 844 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 845 DEFAULT_L2_CACHE_MAX_SIZE); 846 *refcount_cache_size = qemu_opt_get_size(opts, 847 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 848 849 *l2_cache_entry_size = qemu_opt_get_size( 850 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 851 852 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 853 854 if (combined_cache_size_set) { 855 if (l2_cache_size_set && refcount_cache_size_set) { 856 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 857 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 858 "at the same time"); 859 return; 860 } else if (l2_cache_size_set && 861 (l2_cache_max_setting > combined_cache_size)) { 862 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 863 QCOW2_OPT_CACHE_SIZE); 864 return; 865 } else if (*refcount_cache_size > combined_cache_size) { 866 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 867 QCOW2_OPT_CACHE_SIZE); 868 return; 869 } 870 871 if (l2_cache_size_set) { 872 *refcount_cache_size = combined_cache_size - *l2_cache_size; 873 } else if (refcount_cache_size_set) { 874 *l2_cache_size = combined_cache_size - *refcount_cache_size; 875 } else { 876 /* Assign as much memory as possible to the L2 cache, and 877 * use the remainder for the refcount cache */ 878 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 879 *l2_cache_size = max_l2_cache; 880 *refcount_cache_size = combined_cache_size - *l2_cache_size; 881 } else { 882 *refcount_cache_size = 883 MIN(combined_cache_size, min_refcount_cache); 884 *l2_cache_size = combined_cache_size - *refcount_cache_size; 885 } 886 } 887 } 888 889 /* 890 * If the L2 cache is not enough to cover the whole disk then 891 * default to 4KB entries. Smaller entries reduce the cost of 892 * loads and evictions and increase I/O performance. 893 */ 894 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 895 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 896 } 897 898 /* l2_cache_size and refcount_cache_size are ensured to have at least 899 * their minimum values in qcow2_update_options_prepare() */ 900 901 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 902 *l2_cache_entry_size > s->cluster_size || 903 !is_power_of_2(*l2_cache_entry_size)) { 904 error_setg(errp, "L2 cache entry size must be a power of two " 905 "between %d and the cluster size (%d)", 906 1 << MIN_CLUSTER_BITS, s->cluster_size); 907 return; 908 } 909 } 910 911 typedef struct Qcow2ReopenState { 912 Qcow2Cache *l2_table_cache; 913 Qcow2Cache *refcount_block_cache; 914 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 915 bool use_lazy_refcounts; 916 int overlap_check; 917 bool discard_passthrough[QCOW2_DISCARD_MAX]; 918 uint64_t cache_clean_interval; 919 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 920 } Qcow2ReopenState; 921 922 static int qcow2_update_options_prepare(BlockDriverState *bs, 923 Qcow2ReopenState *r, 924 QDict *options, int flags, 925 Error **errp) 926 { 927 BDRVQcow2State *s = bs->opaque; 928 QemuOpts *opts = NULL; 929 const char *opt_overlap_check, *opt_overlap_check_template; 930 int overlap_check_template = 0; 931 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 932 int i; 933 const char *encryptfmt; 934 QDict *encryptopts = NULL; 935 Error *local_err = NULL; 936 int ret; 937 938 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 939 encryptfmt = qdict_get_try_str(encryptopts, "format"); 940 941 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 942 qemu_opts_absorb_qdict(opts, options, &local_err); 943 if (local_err) { 944 error_propagate(errp, local_err); 945 ret = -EINVAL; 946 goto fail; 947 } 948 949 /* get L2 table/refcount block cache size from command line options */ 950 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 951 &refcount_cache_size, &local_err); 952 if (local_err) { 953 error_propagate(errp, local_err); 954 ret = -EINVAL; 955 goto fail; 956 } 957 958 l2_cache_size /= l2_cache_entry_size; 959 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 960 l2_cache_size = MIN_L2_CACHE_SIZE; 961 } 962 if (l2_cache_size > INT_MAX) { 963 error_setg(errp, "L2 cache size too big"); 964 ret = -EINVAL; 965 goto fail; 966 } 967 968 refcount_cache_size /= s->cluster_size; 969 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 970 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 971 } 972 if (refcount_cache_size > INT_MAX) { 973 error_setg(errp, "Refcount cache size too big"); 974 ret = -EINVAL; 975 goto fail; 976 } 977 978 /* alloc new L2 table/refcount block cache, flush old one */ 979 if (s->l2_table_cache) { 980 ret = qcow2_cache_flush(bs, s->l2_table_cache); 981 if (ret) { 982 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 983 goto fail; 984 } 985 } 986 987 if (s->refcount_block_cache) { 988 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 989 if (ret) { 990 error_setg_errno(errp, -ret, 991 "Failed to flush the refcount block cache"); 992 goto fail; 993 } 994 } 995 996 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); 997 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 998 l2_cache_entry_size); 999 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1000 s->cluster_size); 1001 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1002 error_setg(errp, "Could not allocate metadata caches"); 1003 ret = -ENOMEM; 1004 goto fail; 1005 } 1006 1007 /* New interval for cache cleanup timer */ 1008 r->cache_clean_interval = 1009 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1010 DEFAULT_CACHE_CLEAN_INTERVAL); 1011 #ifndef CONFIG_LINUX 1012 if (r->cache_clean_interval != 0) { 1013 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1014 " not supported on this host"); 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 #endif 1019 if (r->cache_clean_interval > UINT_MAX) { 1020 error_setg(errp, "Cache clean interval too big"); 1021 ret = -EINVAL; 1022 goto fail; 1023 } 1024 1025 /* lazy-refcounts; flush if going from enabled to disabled */ 1026 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1027 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1028 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1029 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1030 "qemu 1.1 compatibility level"); 1031 ret = -EINVAL; 1032 goto fail; 1033 } 1034 1035 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1036 ret = qcow2_mark_clean(bs); 1037 if (ret < 0) { 1038 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1039 goto fail; 1040 } 1041 } 1042 1043 /* Overlap check options */ 1044 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1045 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1046 if (opt_overlap_check_template && opt_overlap_check && 1047 strcmp(opt_overlap_check_template, opt_overlap_check)) 1048 { 1049 error_setg(errp, "Conflicting values for qcow2 options '" 1050 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1051 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1052 ret = -EINVAL; 1053 goto fail; 1054 } 1055 if (!opt_overlap_check) { 1056 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1057 } 1058 1059 if (!strcmp(opt_overlap_check, "none")) { 1060 overlap_check_template = 0; 1061 } else if (!strcmp(opt_overlap_check, "constant")) { 1062 overlap_check_template = QCOW2_OL_CONSTANT; 1063 } else if (!strcmp(opt_overlap_check, "cached")) { 1064 overlap_check_template = QCOW2_OL_CACHED; 1065 } else if (!strcmp(opt_overlap_check, "all")) { 1066 overlap_check_template = QCOW2_OL_ALL; 1067 } else { 1068 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1069 "'overlap-check'. Allowed are any of the following: " 1070 "none, constant, cached, all", opt_overlap_check); 1071 ret = -EINVAL; 1072 goto fail; 1073 } 1074 1075 r->overlap_check = 0; 1076 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1077 /* overlap-check defines a template bitmask, but every flag may be 1078 * overwritten through the associated boolean option */ 1079 r->overlap_check |= 1080 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1081 overlap_check_template & (1 << i)) << i; 1082 } 1083 1084 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1085 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1086 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1087 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1088 flags & BDRV_O_UNMAP); 1089 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1090 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1091 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1092 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1093 1094 switch (s->crypt_method_header) { 1095 case QCOW_CRYPT_NONE: 1096 if (encryptfmt) { 1097 error_setg(errp, "No encryption in image header, but options " 1098 "specified format '%s'", encryptfmt); 1099 ret = -EINVAL; 1100 goto fail; 1101 } 1102 break; 1103 1104 case QCOW_CRYPT_AES: 1105 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1106 error_setg(errp, 1107 "Header reported 'aes' encryption format but " 1108 "options specify '%s'", encryptfmt); 1109 ret = -EINVAL; 1110 goto fail; 1111 } 1112 qdict_put_str(encryptopts, "format", "qcow"); 1113 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1114 break; 1115 1116 case QCOW_CRYPT_LUKS: 1117 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1118 error_setg(errp, 1119 "Header reported 'luks' encryption format but " 1120 "options specify '%s'", encryptfmt); 1121 ret = -EINVAL; 1122 goto fail; 1123 } 1124 qdict_put_str(encryptopts, "format", "luks"); 1125 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1126 break; 1127 1128 default: 1129 error_setg(errp, "Unsupported encryption method %d", 1130 s->crypt_method_header); 1131 break; 1132 } 1133 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1134 ret = -EINVAL; 1135 goto fail; 1136 } 1137 1138 ret = 0; 1139 fail: 1140 qobject_unref(encryptopts); 1141 qemu_opts_del(opts); 1142 opts = NULL; 1143 return ret; 1144 } 1145 1146 static void qcow2_update_options_commit(BlockDriverState *bs, 1147 Qcow2ReopenState *r) 1148 { 1149 BDRVQcow2State *s = bs->opaque; 1150 int i; 1151 1152 if (s->l2_table_cache) { 1153 qcow2_cache_destroy(s->l2_table_cache); 1154 } 1155 if (s->refcount_block_cache) { 1156 qcow2_cache_destroy(s->refcount_block_cache); 1157 } 1158 s->l2_table_cache = r->l2_table_cache; 1159 s->refcount_block_cache = r->refcount_block_cache; 1160 s->l2_slice_size = r->l2_slice_size; 1161 1162 s->overlap_check = r->overlap_check; 1163 s->use_lazy_refcounts = r->use_lazy_refcounts; 1164 1165 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1166 s->discard_passthrough[i] = r->discard_passthrough[i]; 1167 } 1168 1169 if (s->cache_clean_interval != r->cache_clean_interval) { 1170 cache_clean_timer_del(bs); 1171 s->cache_clean_interval = r->cache_clean_interval; 1172 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1173 } 1174 1175 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1176 s->crypto_opts = r->crypto_opts; 1177 } 1178 1179 static void qcow2_update_options_abort(BlockDriverState *bs, 1180 Qcow2ReopenState *r) 1181 { 1182 if (r->l2_table_cache) { 1183 qcow2_cache_destroy(r->l2_table_cache); 1184 } 1185 if (r->refcount_block_cache) { 1186 qcow2_cache_destroy(r->refcount_block_cache); 1187 } 1188 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1189 } 1190 1191 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1192 int flags, Error **errp) 1193 { 1194 Qcow2ReopenState r = {}; 1195 int ret; 1196 1197 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1198 if (ret >= 0) { 1199 qcow2_update_options_commit(bs, &r); 1200 } else { 1201 qcow2_update_options_abort(bs, &r); 1202 } 1203 1204 return ret; 1205 } 1206 1207 /* Called with s->lock held. */ 1208 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, 1209 int flags, Error **errp) 1210 { 1211 BDRVQcow2State *s = bs->opaque; 1212 unsigned int len, i; 1213 int ret = 0; 1214 QCowHeader header; 1215 Error *local_err = NULL; 1216 uint64_t ext_end; 1217 uint64_t l1_vm_state_index; 1218 bool update_header = false; 1219 1220 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1221 if (ret < 0) { 1222 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1223 goto fail; 1224 } 1225 header.magic = be32_to_cpu(header.magic); 1226 header.version = be32_to_cpu(header.version); 1227 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1228 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1229 header.size = be64_to_cpu(header.size); 1230 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1231 header.crypt_method = be32_to_cpu(header.crypt_method); 1232 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1233 header.l1_size = be32_to_cpu(header.l1_size); 1234 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1235 header.refcount_table_clusters = 1236 be32_to_cpu(header.refcount_table_clusters); 1237 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1238 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1239 1240 if (header.magic != QCOW_MAGIC) { 1241 error_setg(errp, "Image is not in qcow2 format"); 1242 ret = -EINVAL; 1243 goto fail; 1244 } 1245 if (header.version < 2 || header.version > 3) { 1246 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1247 ret = -ENOTSUP; 1248 goto fail; 1249 } 1250 1251 s->qcow_version = header.version; 1252 1253 /* Initialise cluster size */ 1254 if (header.cluster_bits < MIN_CLUSTER_BITS || 1255 header.cluster_bits > MAX_CLUSTER_BITS) { 1256 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1257 header.cluster_bits); 1258 ret = -EINVAL; 1259 goto fail; 1260 } 1261 1262 s->cluster_bits = header.cluster_bits; 1263 s->cluster_size = 1 << s->cluster_bits; 1264 1265 /* Initialise version 3 header fields */ 1266 if (header.version == 2) { 1267 header.incompatible_features = 0; 1268 header.compatible_features = 0; 1269 header.autoclear_features = 0; 1270 header.refcount_order = 4; 1271 header.header_length = 72; 1272 } else { 1273 header.incompatible_features = 1274 be64_to_cpu(header.incompatible_features); 1275 header.compatible_features = be64_to_cpu(header.compatible_features); 1276 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1277 header.refcount_order = be32_to_cpu(header.refcount_order); 1278 header.header_length = be32_to_cpu(header.header_length); 1279 1280 if (header.header_length < 104) { 1281 error_setg(errp, "qcow2 header too short"); 1282 ret = -EINVAL; 1283 goto fail; 1284 } 1285 } 1286 1287 if (header.header_length > s->cluster_size) { 1288 error_setg(errp, "qcow2 header exceeds cluster size"); 1289 ret = -EINVAL; 1290 goto fail; 1291 } 1292 1293 if (header.header_length > sizeof(header)) { 1294 s->unknown_header_fields_size = header.header_length - sizeof(header); 1295 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1296 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1297 s->unknown_header_fields_size); 1298 if (ret < 0) { 1299 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1300 "fields"); 1301 goto fail; 1302 } 1303 } 1304 1305 if (header.backing_file_offset > s->cluster_size) { 1306 error_setg(errp, "Invalid backing file offset"); 1307 ret = -EINVAL; 1308 goto fail; 1309 } 1310 1311 if (header.backing_file_offset) { 1312 ext_end = header.backing_file_offset; 1313 } else { 1314 ext_end = 1 << header.cluster_bits; 1315 } 1316 1317 /* Handle feature bits */ 1318 s->incompatible_features = header.incompatible_features; 1319 s->compatible_features = header.compatible_features; 1320 s->autoclear_features = header.autoclear_features; 1321 1322 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1323 void *feature_table = NULL; 1324 qcow2_read_extensions(bs, header.header_length, ext_end, 1325 &feature_table, flags, NULL, NULL); 1326 report_unsupported_feature(errp, feature_table, 1327 s->incompatible_features & 1328 ~QCOW2_INCOMPAT_MASK); 1329 ret = -ENOTSUP; 1330 g_free(feature_table); 1331 goto fail; 1332 } 1333 1334 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1335 /* Corrupt images may not be written to unless they are being repaired 1336 */ 1337 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1338 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1339 "read/write"); 1340 ret = -EACCES; 1341 goto fail; 1342 } 1343 } 1344 1345 /* Check support for various header values */ 1346 if (header.refcount_order > 6) { 1347 error_setg(errp, "Reference count entry width too large; may not " 1348 "exceed 64 bits"); 1349 ret = -EINVAL; 1350 goto fail; 1351 } 1352 s->refcount_order = header.refcount_order; 1353 s->refcount_bits = 1 << s->refcount_order; 1354 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1355 s->refcount_max += s->refcount_max - 1; 1356 1357 s->crypt_method_header = header.crypt_method; 1358 if (s->crypt_method_header) { 1359 if (bdrv_uses_whitelist() && 1360 s->crypt_method_header == QCOW_CRYPT_AES) { 1361 error_setg(errp, 1362 "Use of AES-CBC encrypted qcow2 images is no longer " 1363 "supported in system emulators"); 1364 error_append_hint(errp, 1365 "You can use 'qemu-img convert' to convert your " 1366 "image to an alternative supported format, such " 1367 "as unencrypted qcow2, or raw with the LUKS " 1368 "format instead.\n"); 1369 ret = -ENOSYS; 1370 goto fail; 1371 } 1372 1373 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1374 s->crypt_physical_offset = false; 1375 } else { 1376 /* Assuming LUKS and any future crypt methods we 1377 * add will all use physical offsets, due to the 1378 * fact that the alternative is insecure... */ 1379 s->crypt_physical_offset = true; 1380 } 1381 1382 bs->encrypted = true; 1383 } 1384 1385 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1386 s->l2_size = 1 << s->l2_bits; 1387 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1388 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1389 s->refcount_block_size = 1 << s->refcount_block_bits; 1390 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1391 s->csize_shift = (62 - (s->cluster_bits - 8)); 1392 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1393 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1394 1395 s->refcount_table_offset = header.refcount_table_offset; 1396 s->refcount_table_size = 1397 header.refcount_table_clusters << (s->cluster_bits - 3); 1398 1399 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1400 error_setg(errp, "Image does not contain a reference count table"); 1401 ret = -EINVAL; 1402 goto fail; 1403 } 1404 1405 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1406 header.refcount_table_clusters, 1407 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1408 "Reference count table", errp); 1409 if (ret < 0) { 1410 goto fail; 1411 } 1412 1413 /* The total size in bytes of the snapshot table is checked in 1414 * qcow2_read_snapshots() because the size of each snapshot is 1415 * variable and we don't know it yet. 1416 * Here we only check the offset and number of snapshots. */ 1417 ret = qcow2_validate_table(bs, header.snapshots_offset, 1418 header.nb_snapshots, 1419 sizeof(QCowSnapshotHeader), 1420 sizeof(QCowSnapshotHeader) * QCOW_MAX_SNAPSHOTS, 1421 "Snapshot table", errp); 1422 if (ret < 0) { 1423 goto fail; 1424 } 1425 1426 /* read the level 1 table */ 1427 ret = qcow2_validate_table(bs, header.l1_table_offset, 1428 header.l1_size, sizeof(uint64_t), 1429 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1430 if (ret < 0) { 1431 goto fail; 1432 } 1433 s->l1_size = header.l1_size; 1434 s->l1_table_offset = header.l1_table_offset; 1435 1436 l1_vm_state_index = size_to_l1(s, header.size); 1437 if (l1_vm_state_index > INT_MAX) { 1438 error_setg(errp, "Image is too big"); 1439 ret = -EFBIG; 1440 goto fail; 1441 } 1442 s->l1_vm_state_index = l1_vm_state_index; 1443 1444 /* the L1 table must contain at least enough entries to put 1445 header.size bytes */ 1446 if (s->l1_size < s->l1_vm_state_index) { 1447 error_setg(errp, "L1 table is too small"); 1448 ret = -EINVAL; 1449 goto fail; 1450 } 1451 1452 if (s->l1_size > 0) { 1453 s->l1_table = qemu_try_blockalign(bs->file->bs, 1454 ROUND_UP(s->l1_size * sizeof(uint64_t), 512)); 1455 if (s->l1_table == NULL) { 1456 error_setg(errp, "Could not allocate L1 table"); 1457 ret = -ENOMEM; 1458 goto fail; 1459 } 1460 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1461 s->l1_size * sizeof(uint64_t)); 1462 if (ret < 0) { 1463 error_setg_errno(errp, -ret, "Could not read L1 table"); 1464 goto fail; 1465 } 1466 for(i = 0;i < s->l1_size; i++) { 1467 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1468 } 1469 } 1470 1471 /* Parse driver-specific options */ 1472 ret = qcow2_update_options(bs, options, flags, errp); 1473 if (ret < 0) { 1474 goto fail; 1475 } 1476 1477 s->flags = flags; 1478 1479 ret = qcow2_refcount_init(bs); 1480 if (ret != 0) { 1481 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1482 goto fail; 1483 } 1484 1485 QLIST_INIT(&s->cluster_allocs); 1486 QTAILQ_INIT(&s->discards); 1487 1488 /* read qcow2 extensions */ 1489 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1490 flags, &update_header, &local_err)) { 1491 error_propagate(errp, local_err); 1492 ret = -EINVAL; 1493 goto fail; 1494 } 1495 1496 /* Open external data file */ 1497 s->data_file = bdrv_open_child(NULL, options, "data-file", bs, &child_file, 1498 true, &local_err); 1499 if (local_err) { 1500 error_propagate(errp, local_err); 1501 ret = -EINVAL; 1502 goto fail; 1503 } 1504 1505 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1506 if (!s->data_file && s->image_data_file) { 1507 s->data_file = bdrv_open_child(s->image_data_file, options, 1508 "data-file", bs, &child_file, 1509 false, errp); 1510 if (!s->data_file) { 1511 ret = -EINVAL; 1512 goto fail; 1513 } 1514 } 1515 if (!s->data_file) { 1516 error_setg(errp, "'data-file' is required for this image"); 1517 ret = -EINVAL; 1518 goto fail; 1519 } 1520 } else { 1521 if (s->data_file) { 1522 error_setg(errp, "'data-file' can only be set for images with an " 1523 "external data file"); 1524 ret = -EINVAL; 1525 goto fail; 1526 } 1527 1528 s->data_file = bs->file; 1529 1530 if (data_file_is_raw(bs)) { 1531 error_setg(errp, "data-file-raw requires a data file"); 1532 ret = -EINVAL; 1533 goto fail; 1534 } 1535 } 1536 1537 /* qcow2_read_extension may have set up the crypto context 1538 * if the crypt method needs a header region, some methods 1539 * don't need header extensions, so must check here 1540 */ 1541 if (s->crypt_method_header && !s->crypto) { 1542 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1543 unsigned int cflags = 0; 1544 if (flags & BDRV_O_NO_IO) { 1545 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1546 } 1547 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1548 NULL, NULL, cflags, 1549 QCOW2_MAX_THREADS, errp); 1550 if (!s->crypto) { 1551 ret = -EINVAL; 1552 goto fail; 1553 } 1554 } else if (!(flags & BDRV_O_NO_IO)) { 1555 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1556 s->crypt_method_header); 1557 ret = -EINVAL; 1558 goto fail; 1559 } 1560 } 1561 1562 /* read the backing file name */ 1563 if (header.backing_file_offset != 0) { 1564 len = header.backing_file_size; 1565 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1566 len >= sizeof(bs->backing_file)) { 1567 error_setg(errp, "Backing file name too long"); 1568 ret = -EINVAL; 1569 goto fail; 1570 } 1571 ret = bdrv_pread(bs->file, header.backing_file_offset, 1572 bs->auto_backing_file, len); 1573 if (ret < 0) { 1574 error_setg_errno(errp, -ret, "Could not read backing file name"); 1575 goto fail; 1576 } 1577 bs->auto_backing_file[len] = '\0'; 1578 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1579 bs->auto_backing_file); 1580 s->image_backing_file = g_strdup(bs->auto_backing_file); 1581 } 1582 1583 /* Internal snapshots */ 1584 s->snapshots_offset = header.snapshots_offset; 1585 s->nb_snapshots = header.nb_snapshots; 1586 1587 ret = qcow2_read_snapshots(bs); 1588 if (ret < 0) { 1589 error_setg_errno(errp, -ret, "Could not read snapshots"); 1590 goto fail; 1591 } 1592 1593 /* Clear unknown autoclear feature bits */ 1594 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1595 update_header = 1596 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1597 if (update_header) { 1598 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1599 } 1600 1601 /* == Handle persistent dirty bitmaps == 1602 * 1603 * We want load dirty bitmaps in three cases: 1604 * 1605 * 1. Normal open of the disk in active mode, not related to invalidation 1606 * after migration. 1607 * 1608 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1609 * bitmaps are _not_ migrating through migration channel, i.e. 1610 * 'dirty-bitmaps' capability is disabled. 1611 * 1612 * 3. Invalidation of source vm after failed or canceled migration. 1613 * This is a very interesting case. There are two possible types of 1614 * bitmaps: 1615 * 1616 * A. Stored on inactivation and removed. They should be loaded from the 1617 * image. 1618 * 1619 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1620 * the migration channel (with dirty-bitmaps capability). 1621 * 1622 * On the other hand, there are two possible sub-cases: 1623 * 1624 * 3.1 disk was changed by somebody else while were inactive. In this 1625 * case all in-RAM dirty bitmaps (both persistent and not) are 1626 * definitely invalid. And we don't have any method to determine 1627 * this. 1628 * 1629 * Simple and safe thing is to just drop all the bitmaps of type B on 1630 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1631 * 1632 * On the other hand, resuming source vm, if disk was already changed 1633 * is a bad thing anyway: not only bitmaps, the whole vm state is 1634 * out of sync with disk. 1635 * 1636 * This means, that user or management tool, who for some reason 1637 * decided to resume source vm, after disk was already changed by 1638 * target vm, should at least drop all dirty bitmaps by hand. 1639 * 1640 * So, we can ignore this case for now, but TODO: "generation" 1641 * extension for qcow2, to determine, that image was changed after 1642 * last inactivation. And if it is changed, we will drop (or at least 1643 * mark as 'invalid' all the bitmaps of type B, both persistent 1644 * and not). 1645 * 1646 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1647 * to disk ('dirty-bitmaps' capability disabled), or not saved 1648 * ('dirty-bitmaps' capability enabled), but we don't need to care 1649 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1650 * and not stored has flag IN_USE=1 in the image and will be skipped 1651 * on loading. 1652 * 1653 * One remaining possible case when we don't want load bitmaps: 1654 * 1655 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1656 * will be loaded on invalidation, no needs try loading them before) 1657 */ 1658 1659 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1660 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1661 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err); 1662 1663 update_header = update_header && !header_updated; 1664 } 1665 if (local_err != NULL) { 1666 error_propagate(errp, local_err); 1667 ret = -EINVAL; 1668 goto fail; 1669 } 1670 1671 if (update_header) { 1672 ret = qcow2_update_header(bs); 1673 if (ret < 0) { 1674 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1675 goto fail; 1676 } 1677 } 1678 1679 bs->supported_zero_flags = header.version >= 3 ? BDRV_REQ_MAY_UNMAP : 0; 1680 1681 /* Repair image if dirty */ 1682 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1683 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1684 BdrvCheckResult result = {0}; 1685 1686 ret = qcow2_co_check_locked(bs, &result, 1687 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1688 if (ret < 0 || result.check_errors) { 1689 if (ret >= 0) { 1690 ret = -EIO; 1691 } 1692 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1693 goto fail; 1694 } 1695 } 1696 1697 #ifdef DEBUG_ALLOC 1698 { 1699 BdrvCheckResult result = {0}; 1700 qcow2_check_refcounts(bs, &result, 0); 1701 } 1702 #endif 1703 1704 qemu_co_queue_init(&s->thread_task_queue); 1705 1706 return ret; 1707 1708 fail: 1709 g_free(s->image_data_file); 1710 if (has_data_file(bs)) { 1711 bdrv_unref_child(bs, s->data_file); 1712 } 1713 g_free(s->unknown_header_fields); 1714 cleanup_unknown_header_ext(bs); 1715 qcow2_free_snapshots(bs); 1716 qcow2_refcount_close(bs); 1717 qemu_vfree(s->l1_table); 1718 /* else pre-write overlap checks in cache_destroy may crash */ 1719 s->l1_table = NULL; 1720 cache_clean_timer_del(bs); 1721 if (s->l2_table_cache) { 1722 qcow2_cache_destroy(s->l2_table_cache); 1723 } 1724 if (s->refcount_block_cache) { 1725 qcow2_cache_destroy(s->refcount_block_cache); 1726 } 1727 qcrypto_block_free(s->crypto); 1728 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1729 return ret; 1730 } 1731 1732 typedef struct QCow2OpenCo { 1733 BlockDriverState *bs; 1734 QDict *options; 1735 int flags; 1736 Error **errp; 1737 int ret; 1738 } QCow2OpenCo; 1739 1740 static void coroutine_fn qcow2_open_entry(void *opaque) 1741 { 1742 QCow2OpenCo *qoc = opaque; 1743 BDRVQcow2State *s = qoc->bs->opaque; 1744 1745 qemu_co_mutex_lock(&s->lock); 1746 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 1747 qemu_co_mutex_unlock(&s->lock); 1748 } 1749 1750 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1751 Error **errp) 1752 { 1753 BDRVQcow2State *s = bs->opaque; 1754 QCow2OpenCo qoc = { 1755 .bs = bs, 1756 .options = options, 1757 .flags = flags, 1758 .errp = errp, 1759 .ret = -EINPROGRESS 1760 }; 1761 1762 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1763 false, errp); 1764 if (!bs->file) { 1765 return -EINVAL; 1766 } 1767 1768 /* Initialise locks */ 1769 qemu_co_mutex_init(&s->lock); 1770 1771 if (qemu_in_coroutine()) { 1772 /* From bdrv_co_create. */ 1773 qcow2_open_entry(&qoc); 1774 } else { 1775 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1776 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1777 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1778 } 1779 return qoc.ret; 1780 } 1781 1782 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1783 { 1784 BDRVQcow2State *s = bs->opaque; 1785 1786 if (bs->encrypted) { 1787 /* Encryption works on a sector granularity */ 1788 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1789 } 1790 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1791 bs->bl.pdiscard_alignment = s->cluster_size; 1792 } 1793 1794 static int qcow2_reopen_prepare(BDRVReopenState *state, 1795 BlockReopenQueue *queue, Error **errp) 1796 { 1797 Qcow2ReopenState *r; 1798 int ret; 1799 1800 r = g_new0(Qcow2ReopenState, 1); 1801 state->opaque = r; 1802 1803 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1804 state->flags, errp); 1805 if (ret < 0) { 1806 goto fail; 1807 } 1808 1809 /* We need to write out any unwritten data if we reopen read-only. */ 1810 if ((state->flags & BDRV_O_RDWR) == 0) { 1811 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1812 if (ret < 0) { 1813 goto fail; 1814 } 1815 1816 ret = bdrv_flush(state->bs); 1817 if (ret < 0) { 1818 goto fail; 1819 } 1820 1821 ret = qcow2_mark_clean(state->bs); 1822 if (ret < 0) { 1823 goto fail; 1824 } 1825 } 1826 1827 return 0; 1828 1829 fail: 1830 qcow2_update_options_abort(state->bs, r); 1831 g_free(r); 1832 return ret; 1833 } 1834 1835 static void qcow2_reopen_commit(BDRVReopenState *state) 1836 { 1837 qcow2_update_options_commit(state->bs, state->opaque); 1838 if (state->flags & BDRV_O_RDWR) { 1839 Error *local_err = NULL; 1840 1841 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 1842 /* 1843 * This is not fatal, bitmaps just left read-only, so all following 1844 * writes will fail. User can remove read-only bitmaps to unblock 1845 * writes or retry reopen. 1846 */ 1847 error_reportf_err(local_err, 1848 "%s: Failed to make dirty bitmaps writable: ", 1849 bdrv_get_node_name(state->bs)); 1850 } 1851 } 1852 g_free(state->opaque); 1853 } 1854 1855 static void qcow2_reopen_abort(BDRVReopenState *state) 1856 { 1857 qcow2_update_options_abort(state->bs, state->opaque); 1858 g_free(state->opaque); 1859 } 1860 1861 static void qcow2_join_options(QDict *options, QDict *old_options) 1862 { 1863 bool has_new_overlap_template = 1864 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1865 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1866 bool has_new_total_cache_size = 1867 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1868 bool has_all_cache_options; 1869 1870 /* New overlap template overrides all old overlap options */ 1871 if (has_new_overlap_template) { 1872 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1873 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1874 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1875 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1876 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1877 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1878 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1879 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1880 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1881 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1882 } 1883 1884 /* New total cache size overrides all old options */ 1885 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1886 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1887 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1888 } 1889 1890 qdict_join(options, old_options, false); 1891 1892 /* 1893 * If after merging all cache size options are set, an old total size is 1894 * overwritten. Do keep all options, however, if all three are new. The 1895 * resulting error message is what we want to happen. 1896 */ 1897 has_all_cache_options = 1898 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1899 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1900 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1901 1902 if (has_all_cache_options && !has_new_total_cache_size) { 1903 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1904 } 1905 } 1906 1907 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, 1908 bool want_zero, 1909 int64_t offset, int64_t count, 1910 int64_t *pnum, int64_t *map, 1911 BlockDriverState **file) 1912 { 1913 BDRVQcow2State *s = bs->opaque; 1914 uint64_t cluster_offset; 1915 int index_in_cluster, ret; 1916 unsigned int bytes; 1917 int status = 0; 1918 1919 qemu_co_mutex_lock(&s->lock); 1920 1921 if (!s->metadata_preallocation_checked) { 1922 ret = qcow2_detect_metadata_preallocation(bs); 1923 s->metadata_preallocation = (ret == 1); 1924 s->metadata_preallocation_checked = true; 1925 } 1926 1927 bytes = MIN(INT_MAX, count); 1928 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset); 1929 qemu_co_mutex_unlock(&s->lock); 1930 if (ret < 0) { 1931 return ret; 1932 } 1933 1934 *pnum = bytes; 1935 1936 if ((ret == QCOW2_CLUSTER_NORMAL || ret == QCOW2_CLUSTER_ZERO_ALLOC) && 1937 !s->crypto) { 1938 index_in_cluster = offset & (s->cluster_size - 1); 1939 *map = cluster_offset | index_in_cluster; 1940 *file = s->data_file->bs; 1941 status |= BDRV_BLOCK_OFFSET_VALID; 1942 } 1943 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1944 status |= BDRV_BLOCK_ZERO; 1945 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1946 status |= BDRV_BLOCK_DATA; 1947 } 1948 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 1949 (status & BDRV_BLOCK_OFFSET_VALID)) 1950 { 1951 status |= BDRV_BLOCK_RECURSE; 1952 } 1953 return status; 1954 } 1955 1956 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, 1957 QCowL2Meta **pl2meta, 1958 bool link_l2) 1959 { 1960 int ret = 0; 1961 QCowL2Meta *l2meta = *pl2meta; 1962 1963 while (l2meta != NULL) { 1964 QCowL2Meta *next; 1965 1966 if (link_l2) { 1967 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1968 if (ret) { 1969 goto out; 1970 } 1971 } else { 1972 qcow2_alloc_cluster_abort(bs, l2meta); 1973 } 1974 1975 /* Take the request off the list of running requests */ 1976 if (l2meta->nb_clusters != 0) { 1977 QLIST_REMOVE(l2meta, next_in_flight); 1978 } 1979 1980 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1981 1982 next = l2meta->next; 1983 g_free(l2meta); 1984 l2meta = next; 1985 } 1986 out: 1987 *pl2meta = l2meta; 1988 return ret; 1989 } 1990 1991 static coroutine_fn int 1992 qcow2_co_preadv_encrypted(BlockDriverState *bs, 1993 uint64_t file_cluster_offset, 1994 uint64_t offset, 1995 uint64_t bytes, 1996 QEMUIOVector *qiov, 1997 uint64_t qiov_offset) 1998 { 1999 int ret; 2000 BDRVQcow2State *s = bs->opaque; 2001 uint8_t *buf; 2002 2003 assert(bs->encrypted && s->crypto); 2004 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2005 2006 /* 2007 * For encrypted images, read everything into a temporary 2008 * contiguous buffer on which the AES functions can work. 2009 * Also, decryption in a separate buffer is better as it 2010 * prevents the guest from learning information about the 2011 * encrypted nature of the virtual disk. 2012 */ 2013 2014 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2015 if (buf == NULL) { 2016 return -ENOMEM; 2017 } 2018 2019 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2020 ret = bdrv_co_pread(s->data_file, 2021 file_cluster_offset + offset_into_cluster(s, offset), 2022 bytes, buf, 0); 2023 if (ret < 0) { 2024 goto fail; 2025 } 2026 2027 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 2028 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 2029 if (qcow2_co_decrypt(bs, 2030 file_cluster_offset + offset_into_cluster(s, offset), 2031 offset, buf, bytes) < 0) 2032 { 2033 ret = -EIO; 2034 goto fail; 2035 } 2036 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2037 2038 fail: 2039 qemu_vfree(buf); 2040 2041 return ret; 2042 } 2043 2044 typedef struct Qcow2AioTask { 2045 AioTask task; 2046 2047 BlockDriverState *bs; 2048 QCow2ClusterType cluster_type; /* only for read */ 2049 uint64_t file_cluster_offset; 2050 uint64_t offset; 2051 uint64_t bytes; 2052 QEMUIOVector *qiov; 2053 uint64_t qiov_offset; 2054 QCowL2Meta *l2meta; /* only for write */ 2055 } Qcow2AioTask; 2056 2057 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2058 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2059 AioTaskPool *pool, 2060 AioTaskFunc func, 2061 QCow2ClusterType cluster_type, 2062 uint64_t file_cluster_offset, 2063 uint64_t offset, 2064 uint64_t bytes, 2065 QEMUIOVector *qiov, 2066 size_t qiov_offset, 2067 QCowL2Meta *l2meta) 2068 { 2069 Qcow2AioTask local_task; 2070 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2071 2072 *task = (Qcow2AioTask) { 2073 .task.func = func, 2074 .bs = bs, 2075 .cluster_type = cluster_type, 2076 .qiov = qiov, 2077 .file_cluster_offset = file_cluster_offset, 2078 .offset = offset, 2079 .bytes = bytes, 2080 .qiov_offset = qiov_offset, 2081 .l2meta = l2meta, 2082 }; 2083 2084 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2085 func == qcow2_co_preadv_task_entry ? "read" : "write", 2086 cluster_type, file_cluster_offset, offset, bytes, 2087 qiov, qiov_offset); 2088 2089 if (!pool) { 2090 return func(&task->task); 2091 } 2092 2093 aio_task_pool_start_task(pool, &task->task); 2094 2095 return 0; 2096 } 2097 2098 static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs, 2099 QCow2ClusterType cluster_type, 2100 uint64_t file_cluster_offset, 2101 uint64_t offset, uint64_t bytes, 2102 QEMUIOVector *qiov, 2103 size_t qiov_offset) 2104 { 2105 BDRVQcow2State *s = bs->opaque; 2106 int offset_in_cluster = offset_into_cluster(s, offset); 2107 2108 switch (cluster_type) { 2109 case QCOW2_CLUSTER_ZERO_PLAIN: 2110 case QCOW2_CLUSTER_ZERO_ALLOC: 2111 /* Both zero types are handled in qcow2_co_preadv_part */ 2112 g_assert_not_reached(); 2113 2114 case QCOW2_CLUSTER_UNALLOCATED: 2115 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2116 2117 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2118 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2119 qiov, qiov_offset, 0); 2120 2121 case QCOW2_CLUSTER_COMPRESSED: 2122 return qcow2_co_preadv_compressed(bs, file_cluster_offset, 2123 offset, bytes, qiov, qiov_offset); 2124 2125 case QCOW2_CLUSTER_NORMAL: 2126 if ((file_cluster_offset & 511) != 0) { 2127 return -EIO; 2128 } 2129 2130 if (bs->encrypted) { 2131 return qcow2_co_preadv_encrypted(bs, file_cluster_offset, 2132 offset, bytes, qiov, qiov_offset); 2133 } 2134 2135 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2136 return bdrv_co_preadv_part(s->data_file, 2137 file_cluster_offset + offset_in_cluster, 2138 bytes, qiov, qiov_offset, 0); 2139 2140 default: 2141 g_assert_not_reached(); 2142 } 2143 2144 g_assert_not_reached(); 2145 } 2146 2147 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task) 2148 { 2149 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2150 2151 assert(!t->l2meta); 2152 2153 return qcow2_co_preadv_task(t->bs, t->cluster_type, t->file_cluster_offset, 2154 t->offset, t->bytes, t->qiov, t->qiov_offset); 2155 } 2156 2157 static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, 2158 uint64_t offset, uint64_t bytes, 2159 QEMUIOVector *qiov, 2160 size_t qiov_offset, int flags) 2161 { 2162 BDRVQcow2State *s = bs->opaque; 2163 int ret = 0; 2164 unsigned int cur_bytes; /* number of bytes in current iteration */ 2165 uint64_t cluster_offset = 0; 2166 AioTaskPool *aio = NULL; 2167 2168 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2169 /* prepare next request */ 2170 cur_bytes = MIN(bytes, INT_MAX); 2171 if (s->crypto) { 2172 cur_bytes = MIN(cur_bytes, 2173 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2174 } 2175 2176 qemu_co_mutex_lock(&s->lock); 2177 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 2178 qemu_co_mutex_unlock(&s->lock); 2179 if (ret < 0) { 2180 goto out; 2181 } 2182 2183 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || 2184 ret == QCOW2_CLUSTER_ZERO_ALLOC || 2185 (ret == QCOW2_CLUSTER_UNALLOCATED && !bs->backing)) 2186 { 2187 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2188 } else { 2189 if (!aio && cur_bytes != bytes) { 2190 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2191 } 2192 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, ret, 2193 cluster_offset, offset, cur_bytes, 2194 qiov, qiov_offset, NULL); 2195 if (ret < 0) { 2196 goto out; 2197 } 2198 } 2199 2200 bytes -= cur_bytes; 2201 offset += cur_bytes; 2202 qiov_offset += cur_bytes; 2203 } 2204 2205 out: 2206 if (aio) { 2207 aio_task_pool_wait_all(aio); 2208 if (ret == 0) { 2209 ret = aio_task_pool_status(aio); 2210 } 2211 g_free(aio); 2212 } 2213 2214 return ret; 2215 } 2216 2217 /* Check if it's possible to merge a write request with the writing of 2218 * the data from the COW regions */ 2219 static bool merge_cow(uint64_t offset, unsigned bytes, 2220 QEMUIOVector *qiov, size_t qiov_offset, 2221 QCowL2Meta *l2meta) 2222 { 2223 QCowL2Meta *m; 2224 2225 for (m = l2meta; m != NULL; m = m->next) { 2226 /* If both COW regions are empty then there's nothing to merge */ 2227 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2228 continue; 2229 } 2230 2231 /* If COW regions are handled already, skip this too */ 2232 if (m->skip_cow) { 2233 continue; 2234 } 2235 2236 /* The data (middle) region must be immediately after the 2237 * start region */ 2238 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2239 continue; 2240 } 2241 2242 /* The end region must be immediately after the data (middle) 2243 * region */ 2244 if (m->offset + m->cow_end.offset != offset + bytes) { 2245 continue; 2246 } 2247 2248 /* Make sure that adding both COW regions to the QEMUIOVector 2249 * does not exceed IOV_MAX */ 2250 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2251 continue; 2252 } 2253 2254 m->data_qiov = qiov; 2255 m->data_qiov_offset = qiov_offset; 2256 return true; 2257 } 2258 2259 return false; 2260 } 2261 2262 static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes) 2263 { 2264 int64_t nr; 2265 return !bytes || 2266 (!bdrv_is_allocated_above(bs, NULL, false, offset, bytes, &nr) && 2267 nr == bytes); 2268 } 2269 2270 static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2271 { 2272 /* 2273 * This check is designed for optimization shortcut so it must be 2274 * efficient. 2275 * Instead of is_zero(), use is_unallocated() as it is faster (but not 2276 * as accurate and can result in false negatives). 2277 */ 2278 return is_unallocated(bs, m->offset + m->cow_start.offset, 2279 m->cow_start.nb_bytes) && 2280 is_unallocated(bs, m->offset + m->cow_end.offset, 2281 m->cow_end.nb_bytes); 2282 } 2283 2284 static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2285 { 2286 BDRVQcow2State *s = bs->opaque; 2287 QCowL2Meta *m; 2288 2289 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2290 return 0; 2291 } 2292 2293 if (bs->encrypted) { 2294 return 0; 2295 } 2296 2297 for (m = l2meta; m != NULL; m = m->next) { 2298 int ret; 2299 2300 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2301 continue; 2302 } 2303 2304 if (!is_zero_cow(bs, m)) { 2305 continue; 2306 } 2307 2308 /* 2309 * instead of writing zero COW buffers, 2310 * efficiently zero out the whole clusters 2311 */ 2312 2313 ret = qcow2_pre_write_overlap_check(bs, 0, m->alloc_offset, 2314 m->nb_clusters * s->cluster_size, 2315 true); 2316 if (ret < 0) { 2317 return ret; 2318 } 2319 2320 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2321 ret = bdrv_co_pwrite_zeroes(s->data_file, m->alloc_offset, 2322 m->nb_clusters * s->cluster_size, 2323 BDRV_REQ_NO_FALLBACK); 2324 if (ret < 0) { 2325 if (ret != -ENOTSUP && ret != -EAGAIN) { 2326 return ret; 2327 } 2328 continue; 2329 } 2330 2331 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2332 m->skip_cow = true; 2333 } 2334 return 0; 2335 } 2336 2337 /* 2338 * qcow2_co_pwritev_task 2339 * Called with s->lock unlocked 2340 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2341 * not use it somehow after qcow2_co_pwritev_task() call 2342 */ 2343 static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs, 2344 uint64_t file_cluster_offset, 2345 uint64_t offset, uint64_t bytes, 2346 QEMUIOVector *qiov, 2347 uint64_t qiov_offset, 2348 QCowL2Meta *l2meta) 2349 { 2350 int ret; 2351 BDRVQcow2State *s = bs->opaque; 2352 void *crypt_buf = NULL; 2353 int offset_in_cluster = offset_into_cluster(s, offset); 2354 QEMUIOVector encrypted_qiov; 2355 2356 if (bs->encrypted) { 2357 assert(s->crypto); 2358 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2359 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2360 if (crypt_buf == NULL) { 2361 ret = -ENOMEM; 2362 goto out_unlocked; 2363 } 2364 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2365 2366 if (qcow2_co_encrypt(bs, file_cluster_offset + offset_in_cluster, 2367 offset, crypt_buf, bytes) < 0) 2368 { 2369 ret = -EIO; 2370 goto out_unlocked; 2371 } 2372 2373 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2374 qiov = &encrypted_qiov; 2375 qiov_offset = 0; 2376 } 2377 2378 /* Try to efficiently initialize the physical space with zeroes */ 2379 ret = handle_alloc_space(bs, l2meta); 2380 if (ret < 0) { 2381 goto out_unlocked; 2382 } 2383 2384 /* 2385 * If we need to do COW, check if it's possible to merge the 2386 * writing of the guest data together with that of the COW regions. 2387 * If it's not possible (or not necessary) then write the 2388 * guest data now. 2389 */ 2390 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2391 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2392 trace_qcow2_writev_data(qemu_coroutine_self(), 2393 file_cluster_offset + offset_in_cluster); 2394 ret = bdrv_co_pwritev_part(s->data_file, 2395 file_cluster_offset + offset_in_cluster, 2396 bytes, qiov, qiov_offset, 0); 2397 if (ret < 0) { 2398 goto out_unlocked; 2399 } 2400 } 2401 2402 qemu_co_mutex_lock(&s->lock); 2403 2404 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2405 goto out_locked; 2406 2407 out_unlocked: 2408 qemu_co_mutex_lock(&s->lock); 2409 2410 out_locked: 2411 qcow2_handle_l2meta(bs, &l2meta, false); 2412 qemu_co_mutex_unlock(&s->lock); 2413 2414 qemu_vfree(crypt_buf); 2415 2416 return ret; 2417 } 2418 2419 static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task) 2420 { 2421 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2422 2423 assert(!t->cluster_type); 2424 2425 return qcow2_co_pwritev_task(t->bs, t->file_cluster_offset, 2426 t->offset, t->bytes, t->qiov, t->qiov_offset, 2427 t->l2meta); 2428 } 2429 2430 static coroutine_fn int qcow2_co_pwritev_part( 2431 BlockDriverState *bs, uint64_t offset, uint64_t bytes, 2432 QEMUIOVector *qiov, size_t qiov_offset, int flags) 2433 { 2434 BDRVQcow2State *s = bs->opaque; 2435 int offset_in_cluster; 2436 int ret; 2437 unsigned int cur_bytes; /* number of sectors in current iteration */ 2438 uint64_t cluster_offset; 2439 QCowL2Meta *l2meta = NULL; 2440 AioTaskPool *aio = NULL; 2441 2442 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2443 2444 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2445 2446 l2meta = NULL; 2447 2448 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2449 offset_in_cluster = offset_into_cluster(s, offset); 2450 cur_bytes = MIN(bytes, INT_MAX); 2451 if (bs->encrypted) { 2452 cur_bytes = MIN(cur_bytes, 2453 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2454 - offset_in_cluster); 2455 } 2456 2457 qemu_co_mutex_lock(&s->lock); 2458 2459 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2460 &cluster_offset, &l2meta); 2461 if (ret < 0) { 2462 goto out_locked; 2463 } 2464 2465 assert((cluster_offset & 511) == 0); 2466 2467 ret = qcow2_pre_write_overlap_check(bs, 0, 2468 cluster_offset + offset_in_cluster, 2469 cur_bytes, true); 2470 if (ret < 0) { 2471 goto out_locked; 2472 } 2473 2474 qemu_co_mutex_unlock(&s->lock); 2475 2476 if (!aio && cur_bytes != bytes) { 2477 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2478 } 2479 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2480 cluster_offset, offset, cur_bytes, 2481 qiov, qiov_offset, l2meta); 2482 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2483 if (ret < 0) { 2484 goto fail_nometa; 2485 } 2486 2487 bytes -= cur_bytes; 2488 offset += cur_bytes; 2489 qiov_offset += cur_bytes; 2490 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2491 } 2492 ret = 0; 2493 2494 qemu_co_mutex_lock(&s->lock); 2495 2496 out_locked: 2497 qcow2_handle_l2meta(bs, &l2meta, false); 2498 2499 qemu_co_mutex_unlock(&s->lock); 2500 2501 fail_nometa: 2502 if (aio) { 2503 aio_task_pool_wait_all(aio); 2504 if (ret == 0) { 2505 ret = aio_task_pool_status(aio); 2506 } 2507 g_free(aio); 2508 } 2509 2510 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2511 2512 return ret; 2513 } 2514 2515 static int qcow2_inactivate(BlockDriverState *bs) 2516 { 2517 BDRVQcow2State *s = bs->opaque; 2518 int ret, result = 0; 2519 Error *local_err = NULL; 2520 2521 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2522 if (local_err != NULL) { 2523 result = -EINVAL; 2524 error_reportf_err(local_err, "Lost persistent bitmaps during " 2525 "inactivation of node '%s': ", 2526 bdrv_get_device_or_node_name(bs)); 2527 } 2528 2529 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2530 if (ret) { 2531 result = ret; 2532 error_report("Failed to flush the L2 table cache: %s", 2533 strerror(-ret)); 2534 } 2535 2536 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2537 if (ret) { 2538 result = ret; 2539 error_report("Failed to flush the refcount block cache: %s", 2540 strerror(-ret)); 2541 } 2542 2543 if (result == 0) { 2544 qcow2_mark_clean(bs); 2545 } 2546 2547 return result; 2548 } 2549 2550 static void qcow2_close(BlockDriverState *bs) 2551 { 2552 BDRVQcow2State *s = bs->opaque; 2553 qemu_vfree(s->l1_table); 2554 /* else pre-write overlap checks in cache_destroy may crash */ 2555 s->l1_table = NULL; 2556 2557 if (!(s->flags & BDRV_O_INACTIVE)) { 2558 qcow2_inactivate(bs); 2559 } 2560 2561 cache_clean_timer_del(bs); 2562 qcow2_cache_destroy(s->l2_table_cache); 2563 qcow2_cache_destroy(s->refcount_block_cache); 2564 2565 qcrypto_block_free(s->crypto); 2566 s->crypto = NULL; 2567 2568 g_free(s->unknown_header_fields); 2569 cleanup_unknown_header_ext(bs); 2570 2571 g_free(s->image_data_file); 2572 g_free(s->image_backing_file); 2573 g_free(s->image_backing_format); 2574 2575 if (has_data_file(bs)) { 2576 bdrv_unref_child(bs, s->data_file); 2577 } 2578 2579 qcow2_refcount_close(bs); 2580 qcow2_free_snapshots(bs); 2581 } 2582 2583 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, 2584 Error **errp) 2585 { 2586 BDRVQcow2State *s = bs->opaque; 2587 int flags = s->flags; 2588 QCryptoBlock *crypto = NULL; 2589 QDict *options; 2590 Error *local_err = NULL; 2591 int ret; 2592 2593 /* 2594 * Backing files are read-only which makes all of their metadata immutable, 2595 * that means we don't have to worry about reopening them here. 2596 */ 2597 2598 crypto = s->crypto; 2599 s->crypto = NULL; 2600 2601 qcow2_close(bs); 2602 2603 memset(s, 0, sizeof(BDRVQcow2State)); 2604 options = qdict_clone_shallow(bs->options); 2605 2606 flags &= ~BDRV_O_INACTIVE; 2607 qemu_co_mutex_lock(&s->lock); 2608 ret = qcow2_do_open(bs, options, flags, &local_err); 2609 qemu_co_mutex_unlock(&s->lock); 2610 qobject_unref(options); 2611 if (local_err) { 2612 error_propagate_prepend(errp, local_err, 2613 "Could not reopen qcow2 layer: "); 2614 bs->drv = NULL; 2615 return; 2616 } else if (ret < 0) { 2617 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2618 bs->drv = NULL; 2619 return; 2620 } 2621 2622 s->crypto = crypto; 2623 } 2624 2625 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2626 size_t len, size_t buflen) 2627 { 2628 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2629 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2630 2631 if (buflen < ext_len) { 2632 return -ENOSPC; 2633 } 2634 2635 *ext_backing_fmt = (QCowExtension) { 2636 .magic = cpu_to_be32(magic), 2637 .len = cpu_to_be32(len), 2638 }; 2639 2640 if (len) { 2641 memcpy(buf + sizeof(QCowExtension), s, len); 2642 } 2643 2644 return ext_len; 2645 } 2646 2647 /* 2648 * Updates the qcow2 header, including the variable length parts of it, i.e. 2649 * the backing file name and all extensions. qcow2 was not designed to allow 2650 * such changes, so if we run out of space (we can only use the first cluster) 2651 * this function may fail. 2652 * 2653 * Returns 0 on success, -errno in error cases. 2654 */ 2655 int qcow2_update_header(BlockDriverState *bs) 2656 { 2657 BDRVQcow2State *s = bs->opaque; 2658 QCowHeader *header; 2659 char *buf; 2660 size_t buflen = s->cluster_size; 2661 int ret; 2662 uint64_t total_size; 2663 uint32_t refcount_table_clusters; 2664 size_t header_length; 2665 Qcow2UnknownHeaderExtension *uext; 2666 2667 buf = qemu_blockalign(bs, buflen); 2668 2669 /* Header structure */ 2670 header = (QCowHeader*) buf; 2671 2672 if (buflen < sizeof(*header)) { 2673 ret = -ENOSPC; 2674 goto fail; 2675 } 2676 2677 header_length = sizeof(*header) + s->unknown_header_fields_size; 2678 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2679 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2680 2681 *header = (QCowHeader) { 2682 /* Version 2 fields */ 2683 .magic = cpu_to_be32(QCOW_MAGIC), 2684 .version = cpu_to_be32(s->qcow_version), 2685 .backing_file_offset = 0, 2686 .backing_file_size = 0, 2687 .cluster_bits = cpu_to_be32(s->cluster_bits), 2688 .size = cpu_to_be64(total_size), 2689 .crypt_method = cpu_to_be32(s->crypt_method_header), 2690 .l1_size = cpu_to_be32(s->l1_size), 2691 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2692 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2693 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2694 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2695 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2696 2697 /* Version 3 fields */ 2698 .incompatible_features = cpu_to_be64(s->incompatible_features), 2699 .compatible_features = cpu_to_be64(s->compatible_features), 2700 .autoclear_features = cpu_to_be64(s->autoclear_features), 2701 .refcount_order = cpu_to_be32(s->refcount_order), 2702 .header_length = cpu_to_be32(header_length), 2703 }; 2704 2705 /* For older versions, write a shorter header */ 2706 switch (s->qcow_version) { 2707 case 2: 2708 ret = offsetof(QCowHeader, incompatible_features); 2709 break; 2710 case 3: 2711 ret = sizeof(*header); 2712 break; 2713 default: 2714 ret = -EINVAL; 2715 goto fail; 2716 } 2717 2718 buf += ret; 2719 buflen -= ret; 2720 memset(buf, 0, buflen); 2721 2722 /* Preserve any unknown field in the header */ 2723 if (s->unknown_header_fields_size) { 2724 if (buflen < s->unknown_header_fields_size) { 2725 ret = -ENOSPC; 2726 goto fail; 2727 } 2728 2729 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2730 buf += s->unknown_header_fields_size; 2731 buflen -= s->unknown_header_fields_size; 2732 } 2733 2734 /* Backing file format header extension */ 2735 if (s->image_backing_format) { 2736 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2737 s->image_backing_format, 2738 strlen(s->image_backing_format), 2739 buflen); 2740 if (ret < 0) { 2741 goto fail; 2742 } 2743 2744 buf += ret; 2745 buflen -= ret; 2746 } 2747 2748 /* External data file header extension */ 2749 if (has_data_file(bs) && s->image_data_file) { 2750 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 2751 s->image_data_file, strlen(s->image_data_file), 2752 buflen); 2753 if (ret < 0) { 2754 goto fail; 2755 } 2756 2757 buf += ret; 2758 buflen -= ret; 2759 } 2760 2761 /* Full disk encryption header pointer extension */ 2762 if (s->crypto_header.offset != 0) { 2763 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 2764 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 2765 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2766 &s->crypto_header, sizeof(s->crypto_header), 2767 buflen); 2768 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 2769 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 2770 if (ret < 0) { 2771 goto fail; 2772 } 2773 buf += ret; 2774 buflen -= ret; 2775 } 2776 2777 /* Feature table */ 2778 if (s->qcow_version >= 3) { 2779 Qcow2Feature features[] = { 2780 { 2781 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2782 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2783 .name = "dirty bit", 2784 }, 2785 { 2786 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2787 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2788 .name = "corrupt bit", 2789 }, 2790 { 2791 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2792 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 2793 .name = "external data file", 2794 }, 2795 { 2796 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2797 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2798 .name = "lazy refcounts", 2799 }, 2800 }; 2801 2802 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2803 features, sizeof(features), buflen); 2804 if (ret < 0) { 2805 goto fail; 2806 } 2807 buf += ret; 2808 buflen -= ret; 2809 } 2810 2811 /* Bitmap extension */ 2812 if (s->nb_bitmaps > 0) { 2813 Qcow2BitmapHeaderExt bitmaps_header = { 2814 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2815 .bitmap_directory_size = 2816 cpu_to_be64(s->bitmap_directory_size), 2817 .bitmap_directory_offset = 2818 cpu_to_be64(s->bitmap_directory_offset) 2819 }; 2820 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2821 &bitmaps_header, sizeof(bitmaps_header), 2822 buflen); 2823 if (ret < 0) { 2824 goto fail; 2825 } 2826 buf += ret; 2827 buflen -= ret; 2828 } 2829 2830 /* Keep unknown header extensions */ 2831 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2832 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2833 if (ret < 0) { 2834 goto fail; 2835 } 2836 2837 buf += ret; 2838 buflen -= ret; 2839 } 2840 2841 /* End of header extensions */ 2842 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2843 if (ret < 0) { 2844 goto fail; 2845 } 2846 2847 buf += ret; 2848 buflen -= ret; 2849 2850 /* Backing file name */ 2851 if (s->image_backing_file) { 2852 size_t backing_file_len = strlen(s->image_backing_file); 2853 2854 if (buflen < backing_file_len) { 2855 ret = -ENOSPC; 2856 goto fail; 2857 } 2858 2859 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2860 strncpy(buf, s->image_backing_file, buflen); 2861 2862 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2863 header->backing_file_size = cpu_to_be32(backing_file_len); 2864 } 2865 2866 /* Write the new header */ 2867 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2868 if (ret < 0) { 2869 goto fail; 2870 } 2871 2872 ret = 0; 2873 fail: 2874 qemu_vfree(header); 2875 return ret; 2876 } 2877 2878 static int qcow2_change_backing_file(BlockDriverState *bs, 2879 const char *backing_file, const char *backing_fmt) 2880 { 2881 BDRVQcow2State *s = bs->opaque; 2882 2883 /* Adding a backing file means that the external data file alone won't be 2884 * enough to make sense of the content */ 2885 if (backing_file && data_file_is_raw(bs)) { 2886 return -EINVAL; 2887 } 2888 2889 if (backing_file && strlen(backing_file) > 1023) { 2890 return -EINVAL; 2891 } 2892 2893 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 2894 backing_file ?: ""); 2895 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2896 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2897 2898 g_free(s->image_backing_file); 2899 g_free(s->image_backing_format); 2900 2901 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2902 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2903 2904 return qcow2_update_header(bs); 2905 } 2906 2907 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2908 { 2909 if (g_str_equal(encryptfmt, "luks")) { 2910 return QCOW_CRYPT_LUKS; 2911 } else if (g_str_equal(encryptfmt, "aes")) { 2912 return QCOW_CRYPT_AES; 2913 } else { 2914 return -EINVAL; 2915 } 2916 } 2917 2918 static int qcow2_set_up_encryption(BlockDriverState *bs, 2919 QCryptoBlockCreateOptions *cryptoopts, 2920 Error **errp) 2921 { 2922 BDRVQcow2State *s = bs->opaque; 2923 QCryptoBlock *crypto = NULL; 2924 int fmt, ret; 2925 2926 switch (cryptoopts->format) { 2927 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 2928 fmt = QCOW_CRYPT_LUKS; 2929 break; 2930 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 2931 fmt = QCOW_CRYPT_AES; 2932 break; 2933 default: 2934 error_setg(errp, "Crypto format not supported in qcow2"); 2935 return -EINVAL; 2936 } 2937 2938 s->crypt_method_header = fmt; 2939 2940 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2941 qcow2_crypto_hdr_init_func, 2942 qcow2_crypto_hdr_write_func, 2943 bs, errp); 2944 if (!crypto) { 2945 return -EINVAL; 2946 } 2947 2948 ret = qcow2_update_header(bs); 2949 if (ret < 0) { 2950 error_setg_errno(errp, -ret, "Could not write encryption header"); 2951 goto out; 2952 } 2953 2954 ret = 0; 2955 out: 2956 qcrypto_block_free(crypto); 2957 return ret; 2958 } 2959 2960 /** 2961 * Preallocates metadata structures for data clusters between @offset (in the 2962 * guest disk) and @new_length (which is thus generally the new guest disk 2963 * size). 2964 * 2965 * Returns: 0 on success, -errno on failure. 2966 */ 2967 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset, 2968 uint64_t new_length, PreallocMode mode, 2969 Error **errp) 2970 { 2971 BDRVQcow2State *s = bs->opaque; 2972 uint64_t bytes; 2973 uint64_t host_offset = 0; 2974 int64_t file_length; 2975 unsigned int cur_bytes; 2976 int ret; 2977 QCowL2Meta *meta; 2978 2979 assert(offset <= new_length); 2980 bytes = new_length - offset; 2981 2982 while (bytes) { 2983 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 2984 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2985 &host_offset, &meta); 2986 if (ret < 0) { 2987 error_setg_errno(errp, -ret, "Allocating clusters failed"); 2988 return ret; 2989 } 2990 2991 while (meta) { 2992 QCowL2Meta *next = meta->next; 2993 2994 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2995 if (ret < 0) { 2996 error_setg_errno(errp, -ret, "Mapping clusters failed"); 2997 qcow2_free_any_clusters(bs, meta->alloc_offset, 2998 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2999 return ret; 3000 } 3001 3002 /* There are no dependent requests, but we need to remove our 3003 * request from the list of in-flight requests */ 3004 QLIST_REMOVE(meta, next_in_flight); 3005 3006 g_free(meta); 3007 meta = next; 3008 } 3009 3010 /* TODO Preallocate data if requested */ 3011 3012 bytes -= cur_bytes; 3013 offset += cur_bytes; 3014 } 3015 3016 /* 3017 * It is expected that the image file is large enough to actually contain 3018 * all of the allocated clusters (otherwise we get failing reads after 3019 * EOF). Extend the image to the last allocated sector. 3020 */ 3021 file_length = bdrv_getlength(s->data_file->bs); 3022 if (file_length < 0) { 3023 error_setg_errno(errp, -file_length, "Could not get file size"); 3024 return file_length; 3025 } 3026 3027 if (host_offset + cur_bytes > file_length) { 3028 if (mode == PREALLOC_MODE_METADATA) { 3029 mode = PREALLOC_MODE_OFF; 3030 } 3031 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, mode, 3032 errp); 3033 if (ret < 0) { 3034 return ret; 3035 } 3036 } 3037 3038 return 0; 3039 } 3040 3041 /* qcow2_refcount_metadata_size: 3042 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3043 * @cluster_size: size of a cluster, in bytes 3044 * @refcount_order: refcount bits power-of-2 exponent 3045 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3046 * needs to be 3047 * 3048 * Returns: Number of bytes required for refcount blocks and table metadata. 3049 */ 3050 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3051 int refcount_order, bool generous_increase, 3052 uint64_t *refblock_count) 3053 { 3054 /* 3055 * Every host cluster is reference-counted, including metadata (even 3056 * refcount metadata is recursively included). 3057 * 3058 * An accurate formula for the size of refcount metadata size is difficult 3059 * to derive. An easier method of calculation is finding the fixed point 3060 * where no further refcount blocks or table clusters are required to 3061 * reference count every cluster. 3062 */ 3063 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 3064 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3065 int64_t table = 0; /* number of refcount table clusters */ 3066 int64_t blocks = 0; /* number of refcount block clusters */ 3067 int64_t last; 3068 int64_t n = 0; 3069 3070 do { 3071 last = n; 3072 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3073 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3074 n = clusters + blocks + table; 3075 3076 if (n == last && generous_increase) { 3077 clusters += DIV_ROUND_UP(table, 2); 3078 n = 0; /* force another loop */ 3079 generous_increase = false; 3080 } 3081 } while (n != last); 3082 3083 if (refblock_count) { 3084 *refblock_count = blocks; 3085 } 3086 3087 return (blocks + table) * cluster_size; 3088 } 3089 3090 /** 3091 * qcow2_calc_prealloc_size: 3092 * @total_size: virtual disk size in bytes 3093 * @cluster_size: cluster size in bytes 3094 * @refcount_order: refcount bits power-of-2 exponent 3095 * 3096 * Returns: Total number of bytes required for the fully allocated image 3097 * (including metadata). 3098 */ 3099 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3100 size_t cluster_size, 3101 int refcount_order) 3102 { 3103 int64_t meta_size = 0; 3104 uint64_t nl1e, nl2e; 3105 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3106 3107 /* header: 1 cluster */ 3108 meta_size += cluster_size; 3109 3110 /* total size of L2 tables */ 3111 nl2e = aligned_total_size / cluster_size; 3112 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t)); 3113 meta_size += nl2e * sizeof(uint64_t); 3114 3115 /* total size of L1 tables */ 3116 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 3117 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t)); 3118 meta_size += nl1e * sizeof(uint64_t); 3119 3120 /* total size of refcount table and blocks */ 3121 meta_size += qcow2_refcount_metadata_size( 3122 (meta_size + aligned_total_size) / cluster_size, 3123 cluster_size, refcount_order, false, NULL); 3124 3125 return meta_size + aligned_total_size; 3126 } 3127 3128 static bool validate_cluster_size(size_t cluster_size, Error **errp) 3129 { 3130 int cluster_bits = ctz32(cluster_size); 3131 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3132 (1 << cluster_bits) != cluster_size) 3133 { 3134 error_setg(errp, "Cluster size must be a power of two between %d and " 3135 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3136 return false; 3137 } 3138 return true; 3139 } 3140 3141 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 3142 { 3143 size_t cluster_size; 3144 3145 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3146 DEFAULT_CLUSTER_SIZE); 3147 if (!validate_cluster_size(cluster_size, errp)) { 3148 return 0; 3149 } 3150 return cluster_size; 3151 } 3152 3153 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3154 { 3155 char *buf; 3156 int ret; 3157 3158 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3159 if (!buf) { 3160 ret = 3; /* default */ 3161 } else if (!strcmp(buf, "0.10")) { 3162 ret = 2; 3163 } else if (!strcmp(buf, "1.1")) { 3164 ret = 3; 3165 } else { 3166 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3167 ret = -EINVAL; 3168 } 3169 g_free(buf); 3170 return ret; 3171 } 3172 3173 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3174 Error **errp) 3175 { 3176 uint64_t refcount_bits; 3177 3178 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3179 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3180 error_setg(errp, "Refcount width must be a power of two and may not " 3181 "exceed 64 bits"); 3182 return 0; 3183 } 3184 3185 if (version < 3 && refcount_bits != 16) { 3186 error_setg(errp, "Different refcount widths than 16 bits require " 3187 "compatibility level 1.1 or above (use compat=1.1 or " 3188 "greater)"); 3189 return 0; 3190 } 3191 3192 return refcount_bits; 3193 } 3194 3195 static int coroutine_fn 3196 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3197 { 3198 BlockdevCreateOptionsQcow2 *qcow2_opts; 3199 QDict *options; 3200 3201 /* 3202 * Open the image file and write a minimal qcow2 header. 3203 * 3204 * We keep things simple and start with a zero-sized image. We also 3205 * do without refcount blocks or a L1 table for now. We'll fix the 3206 * inconsistency later. 3207 * 3208 * We do need a refcount table because growing the refcount table means 3209 * allocating two new refcount blocks - the seconds of which would be at 3210 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3211 * size for any qcow2 image. 3212 */ 3213 BlockBackend *blk = NULL; 3214 BlockDriverState *bs = NULL; 3215 BlockDriverState *data_bs = NULL; 3216 QCowHeader *header; 3217 size_t cluster_size; 3218 int version; 3219 int refcount_order; 3220 uint64_t* refcount_table; 3221 Error *local_err = NULL; 3222 int ret; 3223 3224 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3225 qcow2_opts = &create_options->u.qcow2; 3226 3227 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp); 3228 if (bs == NULL) { 3229 return -EIO; 3230 } 3231 3232 /* Validate options and set default values */ 3233 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3234 error_setg(errp, "Image size must be a multiple of 512 bytes"); 3235 ret = -EINVAL; 3236 goto out; 3237 } 3238 3239 if (qcow2_opts->has_version) { 3240 switch (qcow2_opts->version) { 3241 case BLOCKDEV_QCOW2_VERSION_V2: 3242 version = 2; 3243 break; 3244 case BLOCKDEV_QCOW2_VERSION_V3: 3245 version = 3; 3246 break; 3247 default: 3248 g_assert_not_reached(); 3249 } 3250 } else { 3251 version = 3; 3252 } 3253 3254 if (qcow2_opts->has_cluster_size) { 3255 cluster_size = qcow2_opts->cluster_size; 3256 } else { 3257 cluster_size = DEFAULT_CLUSTER_SIZE; 3258 } 3259 3260 if (!validate_cluster_size(cluster_size, errp)) { 3261 ret = -EINVAL; 3262 goto out; 3263 } 3264 3265 if (!qcow2_opts->has_preallocation) { 3266 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3267 } 3268 if (qcow2_opts->has_backing_file && 3269 qcow2_opts->preallocation != PREALLOC_MODE_OFF) 3270 { 3271 error_setg(errp, "Backing file and preallocation cannot be used at " 3272 "the same time"); 3273 ret = -EINVAL; 3274 goto out; 3275 } 3276 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { 3277 error_setg(errp, "Backing format cannot be used without backing file"); 3278 ret = -EINVAL; 3279 goto out; 3280 } 3281 3282 if (!qcow2_opts->has_lazy_refcounts) { 3283 qcow2_opts->lazy_refcounts = false; 3284 } 3285 if (version < 3 && qcow2_opts->lazy_refcounts) { 3286 error_setg(errp, "Lazy refcounts only supported with compatibility " 3287 "level 1.1 and above (use version=v3 or greater)"); 3288 ret = -EINVAL; 3289 goto out; 3290 } 3291 3292 if (!qcow2_opts->has_refcount_bits) { 3293 qcow2_opts->refcount_bits = 16; 3294 } 3295 if (qcow2_opts->refcount_bits > 64 || 3296 !is_power_of_2(qcow2_opts->refcount_bits)) 3297 { 3298 error_setg(errp, "Refcount width must be a power of two and may not " 3299 "exceed 64 bits"); 3300 ret = -EINVAL; 3301 goto out; 3302 } 3303 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3304 error_setg(errp, "Different refcount widths than 16 bits require " 3305 "compatibility level 1.1 or above (use version=v3 or " 3306 "greater)"); 3307 ret = -EINVAL; 3308 goto out; 3309 } 3310 refcount_order = ctz32(qcow2_opts->refcount_bits); 3311 3312 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3313 error_setg(errp, "data-file-raw requires data-file"); 3314 ret = -EINVAL; 3315 goto out; 3316 } 3317 if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) { 3318 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3319 "the same time"); 3320 ret = -EINVAL; 3321 goto out; 3322 } 3323 3324 if (qcow2_opts->data_file) { 3325 if (version < 3) { 3326 error_setg(errp, "External data files are only supported with " 3327 "compatibility level 1.1 and above (use version=v3 or " 3328 "greater)"); 3329 ret = -EINVAL; 3330 goto out; 3331 } 3332 data_bs = bdrv_open_blockdev_ref(qcow2_opts->data_file, errp); 3333 if (data_bs == NULL) { 3334 ret = -EIO; 3335 goto out; 3336 } 3337 } 3338 3339 /* Create BlockBackend to write to the image */ 3340 blk = blk_new(bdrv_get_aio_context(bs), 3341 BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); 3342 ret = blk_insert_bs(blk, bs, errp); 3343 if (ret < 0) { 3344 goto out; 3345 } 3346 blk_set_allow_write_beyond_eof(blk, true); 3347 3348 /* Clear the protocol layer and preallocate it if necessary */ 3349 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); 3350 if (ret < 0) { 3351 goto out; 3352 } 3353 3354 /* Write the header */ 3355 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3356 header = g_malloc0(cluster_size); 3357 *header = (QCowHeader) { 3358 .magic = cpu_to_be32(QCOW_MAGIC), 3359 .version = cpu_to_be32(version), 3360 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3361 .size = cpu_to_be64(0), 3362 .l1_table_offset = cpu_to_be64(0), 3363 .l1_size = cpu_to_be32(0), 3364 .refcount_table_offset = cpu_to_be64(cluster_size), 3365 .refcount_table_clusters = cpu_to_be32(1), 3366 .refcount_order = cpu_to_be32(refcount_order), 3367 .header_length = cpu_to_be32(sizeof(*header)), 3368 }; 3369 3370 /* We'll update this to correct value later */ 3371 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3372 3373 if (qcow2_opts->lazy_refcounts) { 3374 header->compatible_features |= 3375 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3376 } 3377 if (data_bs) { 3378 header->incompatible_features |= 3379 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3380 } 3381 if (qcow2_opts->data_file_raw) { 3382 header->autoclear_features |= 3383 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3384 } 3385 3386 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 3387 g_free(header); 3388 if (ret < 0) { 3389 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3390 goto out; 3391 } 3392 3393 /* Write a refcount table with one refcount block */ 3394 refcount_table = g_malloc0(2 * cluster_size); 3395 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3396 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 3397 g_free(refcount_table); 3398 3399 if (ret < 0) { 3400 error_setg_errno(errp, -ret, "Could not write refcount table"); 3401 goto out; 3402 } 3403 3404 blk_unref(blk); 3405 blk = NULL; 3406 3407 /* 3408 * And now open the image and make it consistent first (i.e. increase the 3409 * refcount of the cluster that is occupied by the header and the refcount 3410 * table) 3411 */ 3412 options = qdict_new(); 3413 qdict_put_str(options, "driver", "qcow2"); 3414 qdict_put_str(options, "file", bs->node_name); 3415 if (data_bs) { 3416 qdict_put_str(options, "data-file", data_bs->node_name); 3417 } 3418 blk = blk_new_open(NULL, NULL, options, 3419 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3420 &local_err); 3421 if (blk == NULL) { 3422 error_propagate(errp, local_err); 3423 ret = -EIO; 3424 goto out; 3425 } 3426 3427 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3428 if (ret < 0) { 3429 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3430 "header and refcount table"); 3431 goto out; 3432 3433 } else if (ret != 0) { 3434 error_report("Huh, first cluster in empty image is already in use?"); 3435 abort(); 3436 } 3437 3438 /* Set the external data file if necessary */ 3439 if (data_bs) { 3440 BDRVQcow2State *s = blk_bs(blk)->opaque; 3441 s->image_data_file = g_strdup(data_bs->filename); 3442 } 3443 3444 /* Create a full header (including things like feature table) */ 3445 ret = qcow2_update_header(blk_bs(blk)); 3446 if (ret < 0) { 3447 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3448 goto out; 3449 } 3450 3451 /* Okay, now that we have a valid image, let's give it the right size */ 3452 ret = blk_truncate(blk, qcow2_opts->size, qcow2_opts->preallocation, errp); 3453 if (ret < 0) { 3454 error_prepend(errp, "Could not resize image: "); 3455 goto out; 3456 } 3457 3458 /* Want a backing file? There you go.*/ 3459 if (qcow2_opts->has_backing_file) { 3460 const char *backing_format = NULL; 3461 3462 if (qcow2_opts->has_backing_fmt) { 3463 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3464 } 3465 3466 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3467 backing_format); 3468 if (ret < 0) { 3469 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3470 "with format '%s'", qcow2_opts->backing_file, 3471 backing_format); 3472 goto out; 3473 } 3474 } 3475 3476 /* Want encryption? There you go. */ 3477 if (qcow2_opts->has_encrypt) { 3478 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3479 if (ret < 0) { 3480 goto out; 3481 } 3482 } 3483 3484 blk_unref(blk); 3485 blk = NULL; 3486 3487 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3488 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3489 * have to setup decryption context. We're not doing any I/O on the top 3490 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3491 * not have effect. 3492 */ 3493 options = qdict_new(); 3494 qdict_put_str(options, "driver", "qcow2"); 3495 qdict_put_str(options, "file", bs->node_name); 3496 if (data_bs) { 3497 qdict_put_str(options, "data-file", data_bs->node_name); 3498 } 3499 blk = blk_new_open(NULL, NULL, options, 3500 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3501 &local_err); 3502 if (blk == NULL) { 3503 error_propagate(errp, local_err); 3504 ret = -EIO; 3505 goto out; 3506 } 3507 3508 ret = 0; 3509 out: 3510 blk_unref(blk); 3511 bdrv_unref(bs); 3512 bdrv_unref(data_bs); 3513 return ret; 3514 } 3515 3516 static int coroutine_fn qcow2_co_create_opts(const char *filename, QemuOpts *opts, 3517 Error **errp) 3518 { 3519 BlockdevCreateOptions *create_options = NULL; 3520 QDict *qdict; 3521 Visitor *v; 3522 BlockDriverState *bs = NULL; 3523 BlockDriverState *data_bs = NULL; 3524 Error *local_err = NULL; 3525 const char *val; 3526 int ret; 3527 3528 /* Only the keyval visitor supports the dotted syntax needed for 3529 * encryption, so go through a QDict before getting a QAPI type. Ignore 3530 * options meant for the protocol layer so that the visitor doesn't 3531 * complain. */ 3532 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3533 true); 3534 3535 /* Handle encryption options */ 3536 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3537 if (val && !strcmp(val, "on")) { 3538 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3539 } else if (val && !strcmp(val, "off")) { 3540 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3541 } 3542 3543 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3544 if (val && !strcmp(val, "aes")) { 3545 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3546 } 3547 3548 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3549 * version=v2/v3 below. */ 3550 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3551 if (val && !strcmp(val, "0.10")) { 3552 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3553 } else if (val && !strcmp(val, "1.1")) { 3554 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3555 } 3556 3557 /* Change legacy command line options into QMP ones */ 3558 static const QDictRenames opt_renames[] = { 3559 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3560 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3561 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3562 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3563 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3564 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3565 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3566 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3567 { NULL, NULL }, 3568 }; 3569 3570 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3571 ret = -EINVAL; 3572 goto finish; 3573 } 3574 3575 /* Create and open the file (protocol layer) */ 3576 ret = bdrv_create_file(filename, opts, errp); 3577 if (ret < 0) { 3578 goto finish; 3579 } 3580 3581 bs = bdrv_open(filename, NULL, NULL, 3582 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3583 if (bs == NULL) { 3584 ret = -EIO; 3585 goto finish; 3586 } 3587 3588 /* Create and open an external data file (protocol layer) */ 3589 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3590 if (val) { 3591 ret = bdrv_create_file(val, opts, errp); 3592 if (ret < 0) { 3593 goto finish; 3594 } 3595 3596 data_bs = bdrv_open(val, NULL, NULL, 3597 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3598 errp); 3599 if (data_bs == NULL) { 3600 ret = -EIO; 3601 goto finish; 3602 } 3603 3604 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3605 qdict_put_str(qdict, "data-file", data_bs->node_name); 3606 } 3607 3608 /* Set 'driver' and 'node' options */ 3609 qdict_put_str(qdict, "driver", "qcow2"); 3610 qdict_put_str(qdict, "file", bs->node_name); 3611 3612 /* Now get the QAPI type BlockdevCreateOptions */ 3613 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3614 if (!v) { 3615 ret = -EINVAL; 3616 goto finish; 3617 } 3618 3619 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 3620 visit_free(v); 3621 3622 if (local_err) { 3623 error_propagate(errp, local_err); 3624 ret = -EINVAL; 3625 goto finish; 3626 } 3627 3628 /* Silently round up size */ 3629 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3630 BDRV_SECTOR_SIZE); 3631 3632 /* Create the qcow2 image (format layer) */ 3633 ret = qcow2_co_create(create_options, errp); 3634 if (ret < 0) { 3635 goto finish; 3636 } 3637 3638 ret = 0; 3639 finish: 3640 qobject_unref(qdict); 3641 bdrv_unref(bs); 3642 bdrv_unref(data_bs); 3643 qapi_free_BlockdevCreateOptions(create_options); 3644 return ret; 3645 } 3646 3647 3648 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3649 { 3650 int64_t nr; 3651 int res; 3652 3653 /* Clamp to image length, before checking status of underlying sectors */ 3654 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3655 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3656 } 3657 3658 if (!bytes) { 3659 return true; 3660 } 3661 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3662 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes; 3663 } 3664 3665 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3666 int64_t offset, int bytes, BdrvRequestFlags flags) 3667 { 3668 int ret; 3669 BDRVQcow2State *s = bs->opaque; 3670 3671 uint32_t head = offset % s->cluster_size; 3672 uint32_t tail = (offset + bytes) % s->cluster_size; 3673 3674 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3675 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3676 tail = 0; 3677 } 3678 3679 if (head || tail) { 3680 uint64_t off; 3681 unsigned int nr; 3682 3683 assert(head + bytes <= s->cluster_size); 3684 3685 /* check whether remainder of cluster already reads as zero */ 3686 if (!(is_zero(bs, offset - head, head) && 3687 is_zero(bs, offset + bytes, 3688 tail ? s->cluster_size - tail : 0))) { 3689 return -ENOTSUP; 3690 } 3691 3692 qemu_co_mutex_lock(&s->lock); 3693 /* We can have new write after previous check */ 3694 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size); 3695 bytes = s->cluster_size; 3696 nr = s->cluster_size; 3697 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3698 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3699 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3700 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3701 qemu_co_mutex_unlock(&s->lock); 3702 return -ENOTSUP; 3703 } 3704 } else { 3705 qemu_co_mutex_lock(&s->lock); 3706 } 3707 3708 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3709 3710 /* Whatever is left can use real zero clusters */ 3711 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3712 qemu_co_mutex_unlock(&s->lock); 3713 3714 return ret; 3715 } 3716 3717 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3718 int64_t offset, int bytes) 3719 { 3720 int ret; 3721 BDRVQcow2State *s = bs->opaque; 3722 3723 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3724 assert(bytes < s->cluster_size); 3725 /* Ignore partial clusters, except for the special case of the 3726 * complete partial cluster at the end of an unaligned file */ 3727 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3728 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3729 return -ENOTSUP; 3730 } 3731 } 3732 3733 qemu_co_mutex_lock(&s->lock); 3734 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3735 false); 3736 qemu_co_mutex_unlock(&s->lock); 3737 return ret; 3738 } 3739 3740 static int coroutine_fn 3741 qcow2_co_copy_range_from(BlockDriverState *bs, 3742 BdrvChild *src, uint64_t src_offset, 3743 BdrvChild *dst, uint64_t dst_offset, 3744 uint64_t bytes, BdrvRequestFlags read_flags, 3745 BdrvRequestFlags write_flags) 3746 { 3747 BDRVQcow2State *s = bs->opaque; 3748 int ret; 3749 unsigned int cur_bytes; /* number of bytes in current iteration */ 3750 BdrvChild *child = NULL; 3751 BdrvRequestFlags cur_write_flags; 3752 3753 assert(!bs->encrypted); 3754 qemu_co_mutex_lock(&s->lock); 3755 3756 while (bytes != 0) { 3757 uint64_t copy_offset = 0; 3758 /* prepare next request */ 3759 cur_bytes = MIN(bytes, INT_MAX); 3760 cur_write_flags = write_flags; 3761 3762 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); 3763 if (ret < 0) { 3764 goto out; 3765 } 3766 3767 switch (ret) { 3768 case QCOW2_CLUSTER_UNALLOCATED: 3769 if (bs->backing && bs->backing->bs) { 3770 int64_t backing_length = bdrv_getlength(bs->backing->bs); 3771 if (src_offset >= backing_length) { 3772 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3773 } else { 3774 child = bs->backing; 3775 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 3776 copy_offset = src_offset; 3777 } 3778 } else { 3779 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3780 } 3781 break; 3782 3783 case QCOW2_CLUSTER_ZERO_PLAIN: 3784 case QCOW2_CLUSTER_ZERO_ALLOC: 3785 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3786 break; 3787 3788 case QCOW2_CLUSTER_COMPRESSED: 3789 ret = -ENOTSUP; 3790 goto out; 3791 3792 case QCOW2_CLUSTER_NORMAL: 3793 child = s->data_file; 3794 copy_offset += offset_into_cluster(s, src_offset); 3795 if ((copy_offset & 511) != 0) { 3796 ret = -EIO; 3797 goto out; 3798 } 3799 break; 3800 3801 default: 3802 abort(); 3803 } 3804 qemu_co_mutex_unlock(&s->lock); 3805 ret = bdrv_co_copy_range_from(child, 3806 copy_offset, 3807 dst, dst_offset, 3808 cur_bytes, read_flags, cur_write_flags); 3809 qemu_co_mutex_lock(&s->lock); 3810 if (ret < 0) { 3811 goto out; 3812 } 3813 3814 bytes -= cur_bytes; 3815 src_offset += cur_bytes; 3816 dst_offset += cur_bytes; 3817 } 3818 ret = 0; 3819 3820 out: 3821 qemu_co_mutex_unlock(&s->lock); 3822 return ret; 3823 } 3824 3825 static int coroutine_fn 3826 qcow2_co_copy_range_to(BlockDriverState *bs, 3827 BdrvChild *src, uint64_t src_offset, 3828 BdrvChild *dst, uint64_t dst_offset, 3829 uint64_t bytes, BdrvRequestFlags read_flags, 3830 BdrvRequestFlags write_flags) 3831 { 3832 BDRVQcow2State *s = bs->opaque; 3833 int offset_in_cluster; 3834 int ret; 3835 unsigned int cur_bytes; /* number of sectors in current iteration */ 3836 uint64_t cluster_offset; 3837 QCowL2Meta *l2meta = NULL; 3838 3839 assert(!bs->encrypted); 3840 3841 qemu_co_mutex_lock(&s->lock); 3842 3843 while (bytes != 0) { 3844 3845 l2meta = NULL; 3846 3847 offset_in_cluster = offset_into_cluster(s, dst_offset); 3848 cur_bytes = MIN(bytes, INT_MAX); 3849 3850 /* TODO: 3851 * If src->bs == dst->bs, we could simply copy by incrementing 3852 * the refcnt, without copying user data. 3853 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 3854 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, 3855 &cluster_offset, &l2meta); 3856 if (ret < 0) { 3857 goto fail; 3858 } 3859 3860 assert((cluster_offset & 511) == 0); 3861 3862 ret = qcow2_pre_write_overlap_check(bs, 0, 3863 cluster_offset + offset_in_cluster, cur_bytes, true); 3864 if (ret < 0) { 3865 goto fail; 3866 } 3867 3868 qemu_co_mutex_unlock(&s->lock); 3869 ret = bdrv_co_copy_range_to(src, src_offset, 3870 s->data_file, 3871 cluster_offset + offset_in_cluster, 3872 cur_bytes, read_flags, write_flags); 3873 qemu_co_mutex_lock(&s->lock); 3874 if (ret < 0) { 3875 goto fail; 3876 } 3877 3878 ret = qcow2_handle_l2meta(bs, &l2meta, true); 3879 if (ret) { 3880 goto fail; 3881 } 3882 3883 bytes -= cur_bytes; 3884 src_offset += cur_bytes; 3885 dst_offset += cur_bytes; 3886 } 3887 ret = 0; 3888 3889 fail: 3890 qcow2_handle_l2meta(bs, &l2meta, false); 3891 3892 qemu_co_mutex_unlock(&s->lock); 3893 3894 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 3895 3896 return ret; 3897 } 3898 3899 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, 3900 PreallocMode prealloc, Error **errp) 3901 { 3902 BDRVQcow2State *s = bs->opaque; 3903 uint64_t old_length; 3904 int64_t new_l1_size; 3905 int ret; 3906 QDict *options; 3907 3908 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3909 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3910 { 3911 error_setg(errp, "Unsupported preallocation mode '%s'", 3912 PreallocMode_str(prealloc)); 3913 return -ENOTSUP; 3914 } 3915 3916 if (offset & 511) { 3917 error_setg(errp, "The new size must be a multiple of 512"); 3918 return -EINVAL; 3919 } 3920 3921 qemu_co_mutex_lock(&s->lock); 3922 3923 /* cannot proceed if image has snapshots */ 3924 if (s->nb_snapshots) { 3925 error_setg(errp, "Can't resize an image which has snapshots"); 3926 ret = -ENOTSUP; 3927 goto fail; 3928 } 3929 3930 /* cannot proceed if image has bitmaps */ 3931 if (qcow2_truncate_bitmaps_check(bs, errp)) { 3932 ret = -ENOTSUP; 3933 goto fail; 3934 } 3935 3936 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 3937 new_l1_size = size_to_l1(s, offset); 3938 3939 if (offset < old_length) { 3940 int64_t last_cluster, old_file_size; 3941 if (prealloc != PREALLOC_MODE_OFF) { 3942 error_setg(errp, 3943 "Preallocation can't be used for shrinking an image"); 3944 ret = -EINVAL; 3945 goto fail; 3946 } 3947 3948 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 3949 old_length - ROUND_UP(offset, 3950 s->cluster_size), 3951 QCOW2_DISCARD_ALWAYS, true); 3952 if (ret < 0) { 3953 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 3954 goto fail; 3955 } 3956 3957 ret = qcow2_shrink_l1_table(bs, new_l1_size); 3958 if (ret < 0) { 3959 error_setg_errno(errp, -ret, 3960 "Failed to reduce the number of L2 tables"); 3961 goto fail; 3962 } 3963 3964 ret = qcow2_shrink_reftable(bs); 3965 if (ret < 0) { 3966 error_setg_errno(errp, -ret, 3967 "Failed to discard unused refblocks"); 3968 goto fail; 3969 } 3970 3971 old_file_size = bdrv_getlength(bs->file->bs); 3972 if (old_file_size < 0) { 3973 error_setg_errno(errp, -old_file_size, 3974 "Failed to inquire current file length"); 3975 ret = old_file_size; 3976 goto fail; 3977 } 3978 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 3979 if (last_cluster < 0) { 3980 error_setg_errno(errp, -last_cluster, 3981 "Failed to find the last cluster"); 3982 ret = last_cluster; 3983 goto fail; 3984 } 3985 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 3986 Error *local_err = NULL; 3987 3988 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 3989 PREALLOC_MODE_OFF, &local_err); 3990 if (local_err) { 3991 warn_reportf_err(local_err, 3992 "Failed to truncate the tail of the image: "); 3993 } 3994 } 3995 } else { 3996 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3997 if (ret < 0) { 3998 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3999 goto fail; 4000 } 4001 } 4002 4003 switch (prealloc) { 4004 case PREALLOC_MODE_OFF: 4005 if (has_data_file(bs)) { 4006 ret = bdrv_co_truncate(s->data_file, offset, prealloc, errp); 4007 if (ret < 0) { 4008 goto fail; 4009 } 4010 } 4011 break; 4012 4013 case PREALLOC_MODE_METADATA: 4014 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4015 if (ret < 0) { 4016 goto fail; 4017 } 4018 break; 4019 4020 case PREALLOC_MODE_FALLOC: 4021 case PREALLOC_MODE_FULL: 4022 { 4023 int64_t allocation_start, host_offset, guest_offset; 4024 int64_t clusters_allocated; 4025 int64_t old_file_size, new_file_size; 4026 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4027 4028 /* With a data file, preallocation means just allocating the metadata 4029 * and forwarding the truncate request to the data file */ 4030 if (has_data_file(bs)) { 4031 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4032 if (ret < 0) { 4033 goto fail; 4034 } 4035 break; 4036 } 4037 4038 old_file_size = bdrv_getlength(bs->file->bs); 4039 if (old_file_size < 0) { 4040 error_setg_errno(errp, -old_file_size, 4041 "Failed to inquire current file length"); 4042 ret = old_file_size; 4043 goto fail; 4044 } 4045 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4046 4047 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 4048 s->cluster_size); 4049 4050 /* This is an overestimation; we will not actually allocate space for 4051 * these in the file but just make sure the new refcount structures are 4052 * able to cover them so we will not have to allocate new refblocks 4053 * while entering the data blocks in the potentially new L2 tables. 4054 * (We do not actually care where the L2 tables are placed. Maybe they 4055 * are already allocated or they can be placed somewhere before 4056 * @old_file_size. It does not matter because they will be fully 4057 * allocated automatically, so they do not need to be covered by the 4058 * preallocation. All that matters is that we will not have to allocate 4059 * new refcount structures for them.) */ 4060 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4061 s->cluster_size / sizeof(uint64_t)); 4062 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4063 * table for a potential head/tail */ 4064 nb_new_l2_tables++; 4065 4066 allocation_start = qcow2_refcount_area(bs, old_file_size, 4067 nb_new_data_clusters + 4068 nb_new_l2_tables, 4069 true, 0, 0); 4070 if (allocation_start < 0) { 4071 error_setg_errno(errp, -allocation_start, 4072 "Failed to resize refcount structures"); 4073 ret = allocation_start; 4074 goto fail; 4075 } 4076 4077 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4078 nb_new_data_clusters); 4079 if (clusters_allocated < 0) { 4080 error_setg_errno(errp, -clusters_allocated, 4081 "Failed to allocate data clusters"); 4082 ret = clusters_allocated; 4083 goto fail; 4084 } 4085 4086 assert(clusters_allocated == nb_new_data_clusters); 4087 4088 /* Allocate the data area */ 4089 new_file_size = allocation_start + 4090 nb_new_data_clusters * s->cluster_size; 4091 ret = bdrv_co_truncate(bs->file, new_file_size, prealloc, errp); 4092 if (ret < 0) { 4093 error_prepend(errp, "Failed to resize underlying file: "); 4094 qcow2_free_clusters(bs, allocation_start, 4095 nb_new_data_clusters * s->cluster_size, 4096 QCOW2_DISCARD_OTHER); 4097 goto fail; 4098 } 4099 4100 /* Create the necessary L2 entries */ 4101 host_offset = allocation_start; 4102 guest_offset = old_length; 4103 while (nb_new_data_clusters) { 4104 int64_t nb_clusters = MIN( 4105 nb_new_data_clusters, 4106 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4107 QCowL2Meta allocation = { 4108 .offset = guest_offset, 4109 .alloc_offset = host_offset, 4110 .nb_clusters = nb_clusters, 4111 }; 4112 qemu_co_queue_init(&allocation.dependent_requests); 4113 4114 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4115 if (ret < 0) { 4116 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4117 qcow2_free_clusters(bs, host_offset, 4118 nb_new_data_clusters * s->cluster_size, 4119 QCOW2_DISCARD_OTHER); 4120 goto fail; 4121 } 4122 4123 guest_offset += nb_clusters * s->cluster_size; 4124 host_offset += nb_clusters * s->cluster_size; 4125 nb_new_data_clusters -= nb_clusters; 4126 } 4127 break; 4128 } 4129 4130 default: 4131 g_assert_not_reached(); 4132 } 4133 4134 if (prealloc != PREALLOC_MODE_OFF) { 4135 /* Flush metadata before actually changing the image size */ 4136 ret = qcow2_write_caches(bs); 4137 if (ret < 0) { 4138 error_setg_errno(errp, -ret, 4139 "Failed to flush the preallocated area to disk"); 4140 goto fail; 4141 } 4142 } 4143 4144 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4145 4146 /* write updated header.size */ 4147 offset = cpu_to_be64(offset); 4148 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4149 &offset, sizeof(uint64_t)); 4150 if (ret < 0) { 4151 error_setg_errno(errp, -ret, "Failed to update the image size"); 4152 goto fail; 4153 } 4154 4155 s->l1_vm_state_index = new_l1_size; 4156 4157 /* Update cache sizes */ 4158 options = qdict_clone_shallow(bs->options); 4159 ret = qcow2_update_options(bs, options, s->flags, errp); 4160 qobject_unref(options); 4161 if (ret < 0) { 4162 goto fail; 4163 } 4164 ret = 0; 4165 fail: 4166 qemu_co_mutex_unlock(&s->lock); 4167 return ret; 4168 } 4169 4170 /* XXX: put compressed sectors first, then all the cluster aligned 4171 tables to avoid losing bytes in alignment */ 4172 static coroutine_fn int 4173 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4174 uint64_t offset, uint64_t bytes, 4175 QEMUIOVector *qiov, size_t qiov_offset) 4176 { 4177 BDRVQcow2State *s = bs->opaque; 4178 int ret; 4179 ssize_t out_len; 4180 uint8_t *buf, *out_buf; 4181 uint64_t cluster_offset; 4182 4183 if (has_data_file(bs)) { 4184 return -ENOTSUP; 4185 } 4186 4187 if (bytes == 0) { 4188 /* align end of file to a sector boundary to ease reading with 4189 sector based I/Os */ 4190 int64_t len = bdrv_getlength(bs->file->bs); 4191 if (len < 0) { 4192 return len; 4193 } 4194 return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); 4195 } 4196 4197 if (offset_into_cluster(s, offset)) { 4198 return -EINVAL; 4199 } 4200 4201 buf = qemu_blockalign(bs, s->cluster_size); 4202 if (bytes != s->cluster_size) { 4203 if (bytes > s->cluster_size || 4204 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 4205 { 4206 qemu_vfree(buf); 4207 return -EINVAL; 4208 } 4209 /* Zero-pad last write if image size is not cluster aligned */ 4210 memset(buf + bytes, 0, s->cluster_size - bytes); 4211 } 4212 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4213 4214 out_buf = g_malloc(s->cluster_size); 4215 4216 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4217 buf, s->cluster_size); 4218 if (out_len == -ENOMEM) { 4219 /* could not compress: write normal cluster */ 4220 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4221 if (ret < 0) { 4222 goto fail; 4223 } 4224 goto success; 4225 } else if (out_len < 0) { 4226 ret = -EINVAL; 4227 goto fail; 4228 } 4229 4230 qemu_co_mutex_lock(&s->lock); 4231 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4232 &cluster_offset); 4233 if (ret < 0) { 4234 qemu_co_mutex_unlock(&s->lock); 4235 goto fail; 4236 } 4237 4238 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4239 qemu_co_mutex_unlock(&s->lock); 4240 if (ret < 0) { 4241 goto fail; 4242 } 4243 4244 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4245 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4246 if (ret < 0) { 4247 goto fail; 4248 } 4249 success: 4250 ret = 0; 4251 fail: 4252 qemu_vfree(buf); 4253 g_free(out_buf); 4254 return ret; 4255 } 4256 4257 static int coroutine_fn 4258 qcow2_co_preadv_compressed(BlockDriverState *bs, 4259 uint64_t file_cluster_offset, 4260 uint64_t offset, 4261 uint64_t bytes, 4262 QEMUIOVector *qiov, 4263 size_t qiov_offset) 4264 { 4265 BDRVQcow2State *s = bs->opaque; 4266 int ret = 0, csize, nb_csectors; 4267 uint64_t coffset; 4268 uint8_t *buf, *out_buf; 4269 int offset_in_cluster = offset_into_cluster(s, offset); 4270 4271 coffset = file_cluster_offset & s->cluster_offset_mask; 4272 nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 4273 csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - 4274 (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK); 4275 4276 buf = g_try_malloc(csize); 4277 if (!buf) { 4278 return -ENOMEM; 4279 } 4280 4281 out_buf = qemu_blockalign(bs, s->cluster_size); 4282 4283 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4284 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4285 if (ret < 0) { 4286 goto fail; 4287 } 4288 4289 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4290 ret = -EIO; 4291 goto fail; 4292 } 4293 4294 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4295 4296 fail: 4297 qemu_vfree(out_buf); 4298 g_free(buf); 4299 4300 return ret; 4301 } 4302 4303 static int make_completely_empty(BlockDriverState *bs) 4304 { 4305 BDRVQcow2State *s = bs->opaque; 4306 Error *local_err = NULL; 4307 int ret, l1_clusters; 4308 int64_t offset; 4309 uint64_t *new_reftable = NULL; 4310 uint64_t rt_entry, l1_size2; 4311 struct { 4312 uint64_t l1_offset; 4313 uint64_t reftable_offset; 4314 uint32_t reftable_clusters; 4315 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4316 4317 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4318 if (ret < 0) { 4319 goto fail; 4320 } 4321 4322 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4323 if (ret < 0) { 4324 goto fail; 4325 } 4326 4327 /* Refcounts will be broken utterly */ 4328 ret = qcow2_mark_dirty(bs); 4329 if (ret < 0) { 4330 goto fail; 4331 } 4332 4333 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4334 4335 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4336 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 4337 4338 /* After this call, neither the in-memory nor the on-disk refcount 4339 * information accurately describe the actual references */ 4340 4341 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4342 l1_clusters * s->cluster_size, 0); 4343 if (ret < 0) { 4344 goto fail_broken_refcounts; 4345 } 4346 memset(s->l1_table, 0, l1_size2); 4347 4348 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4349 4350 /* Overwrite enough clusters at the beginning of the sectors to place 4351 * the refcount table, a refcount block and the L1 table in; this may 4352 * overwrite parts of the existing refcount and L1 table, which is not 4353 * an issue because the dirty flag is set, complete data loss is in fact 4354 * desired and partial data loss is consequently fine as well */ 4355 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4356 (2 + l1_clusters) * s->cluster_size, 0); 4357 /* This call (even if it failed overall) may have overwritten on-disk 4358 * refcount structures; in that case, the in-memory refcount information 4359 * will probably differ from the on-disk information which makes the BDS 4360 * unusable */ 4361 if (ret < 0) { 4362 goto fail_broken_refcounts; 4363 } 4364 4365 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4366 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4367 4368 /* "Create" an empty reftable (one cluster) directly after the image 4369 * header and an empty L1 table three clusters after the image header; 4370 * the cluster between those two will be used as the first refblock */ 4371 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4372 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4373 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4374 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4375 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 4376 if (ret < 0) { 4377 goto fail_broken_refcounts; 4378 } 4379 4380 s->l1_table_offset = 3 * s->cluster_size; 4381 4382 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 4383 if (!new_reftable) { 4384 ret = -ENOMEM; 4385 goto fail_broken_refcounts; 4386 } 4387 4388 s->refcount_table_offset = s->cluster_size; 4389 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 4390 s->max_refcount_table_index = 0; 4391 4392 g_free(s->refcount_table); 4393 s->refcount_table = new_reftable; 4394 new_reftable = NULL; 4395 4396 /* Now the in-memory refcount information again corresponds to the on-disk 4397 * information (reftable is empty and no refblocks (the refblock cache is 4398 * empty)); however, this means some clusters (e.g. the image header) are 4399 * referenced, but not refcounted, but the normal qcow2 code assumes that 4400 * the in-memory information is always correct */ 4401 4402 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4403 4404 /* Enter the first refblock into the reftable */ 4405 rt_entry = cpu_to_be64(2 * s->cluster_size); 4406 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 4407 &rt_entry, sizeof(rt_entry)); 4408 if (ret < 0) { 4409 goto fail_broken_refcounts; 4410 } 4411 s->refcount_table[0] = 2 * s->cluster_size; 4412 4413 s->free_cluster_index = 0; 4414 assert(3 + l1_clusters <= s->refcount_block_size); 4415 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4416 if (offset < 0) { 4417 ret = offset; 4418 goto fail_broken_refcounts; 4419 } else if (offset > 0) { 4420 error_report("First cluster in emptied image is in use"); 4421 abort(); 4422 } 4423 4424 /* Now finally the in-memory information corresponds to the on-disk 4425 * structures and is correct */ 4426 ret = qcow2_mark_clean(bs); 4427 if (ret < 0) { 4428 goto fail; 4429 } 4430 4431 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 4432 PREALLOC_MODE_OFF, &local_err); 4433 if (ret < 0) { 4434 error_report_err(local_err); 4435 goto fail; 4436 } 4437 4438 return 0; 4439 4440 fail_broken_refcounts: 4441 /* The BDS is unusable at this point. If we wanted to make it usable, we 4442 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4443 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4444 * again. However, because the functions which could have caused this error 4445 * path to be taken are used by those functions as well, it's very likely 4446 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4447 bs->drv = NULL; 4448 4449 fail: 4450 g_free(new_reftable); 4451 return ret; 4452 } 4453 4454 static int qcow2_make_empty(BlockDriverState *bs) 4455 { 4456 BDRVQcow2State *s = bs->opaque; 4457 uint64_t offset, end_offset; 4458 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4459 int l1_clusters, ret = 0; 4460 4461 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4462 4463 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4464 3 + l1_clusters <= s->refcount_block_size && 4465 s->crypt_method_header != QCOW_CRYPT_LUKS && 4466 !has_data_file(bs)) { 4467 /* The following function only works for qcow2 v3 images (it 4468 * requires the dirty flag) and only as long as there are no 4469 * features that reserve extra clusters (such as snapshots, 4470 * LUKS header, or persistent bitmaps), because it completely 4471 * empties the image. Furthermore, the L1 table and three 4472 * additional clusters (image header, refcount table, one 4473 * refcount block) have to fit inside one refcount block. It 4474 * only resets the image file, i.e. does not work with an 4475 * external data file. */ 4476 return make_completely_empty(bs); 4477 } 4478 4479 /* This fallback code simply discards every active cluster; this is slow, 4480 * but works in all cases */ 4481 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4482 for (offset = 0; offset < end_offset; offset += step) { 4483 /* As this function is generally used after committing an external 4484 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4485 * default action for this kind of discard is to pass the discard, 4486 * which will ideally result in an actually smaller image file, as 4487 * is probably desired. */ 4488 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4489 QCOW2_DISCARD_SNAPSHOT, true); 4490 if (ret < 0) { 4491 break; 4492 } 4493 } 4494 4495 return ret; 4496 } 4497 4498 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4499 { 4500 BDRVQcow2State *s = bs->opaque; 4501 int ret; 4502 4503 qemu_co_mutex_lock(&s->lock); 4504 ret = qcow2_write_caches(bs); 4505 qemu_co_mutex_unlock(&s->lock); 4506 4507 return ret; 4508 } 4509 4510 static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block, 4511 size_t headerlen, void *opaque, Error **errp) 4512 { 4513 size_t *headerlenp = opaque; 4514 4515 /* Stash away the payload size */ 4516 *headerlenp = headerlen; 4517 return 0; 4518 } 4519 4520 static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block, 4521 size_t offset, const uint8_t *buf, size_t buflen, 4522 void *opaque, Error **errp) 4523 { 4524 /* Discard the bytes, we're not actually writing to an image */ 4525 return buflen; 4526 } 4527 4528 /* Determine the number of bytes for the LUKS payload */ 4529 static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len, 4530 Error **errp) 4531 { 4532 QDict *opts_qdict; 4533 QDict *cryptoopts_qdict; 4534 QCryptoBlockCreateOptions *cryptoopts; 4535 QCryptoBlock *crypto; 4536 4537 /* Extract "encrypt." options into a qdict */ 4538 opts_qdict = qemu_opts_to_qdict(opts, NULL); 4539 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 4540 qobject_unref(opts_qdict); 4541 4542 /* Build QCryptoBlockCreateOptions object from qdict */ 4543 qdict_put_str(cryptoopts_qdict, "format", "luks"); 4544 cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp); 4545 qobject_unref(cryptoopts_qdict); 4546 if (!cryptoopts) { 4547 return false; 4548 } 4549 4550 /* Fake LUKS creation in order to determine the payload size */ 4551 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 4552 qcow2_measure_crypto_hdr_init_func, 4553 qcow2_measure_crypto_hdr_write_func, 4554 len, errp); 4555 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 4556 if (!crypto) { 4557 return false; 4558 } 4559 4560 qcrypto_block_free(crypto); 4561 return true; 4562 } 4563 4564 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4565 Error **errp) 4566 { 4567 Error *local_err = NULL; 4568 BlockMeasureInfo *info; 4569 uint64_t required = 0; /* bytes that contribute to required size */ 4570 uint64_t virtual_size; /* disk size as seen by guest */ 4571 uint64_t refcount_bits; 4572 uint64_t l2_tables; 4573 uint64_t luks_payload_size = 0; 4574 size_t cluster_size; 4575 int version; 4576 char *optstr; 4577 PreallocMode prealloc; 4578 bool has_backing_file; 4579 bool has_luks; 4580 4581 /* Parse image creation options */ 4582 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 4583 if (local_err) { 4584 goto err; 4585 } 4586 4587 version = qcow2_opt_get_version_del(opts, &local_err); 4588 if (local_err) { 4589 goto err; 4590 } 4591 4592 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 4593 if (local_err) { 4594 goto err; 4595 } 4596 4597 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 4598 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 4599 PREALLOC_MODE_OFF, &local_err); 4600 g_free(optstr); 4601 if (local_err) { 4602 goto err; 4603 } 4604 4605 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 4606 has_backing_file = !!optstr; 4607 g_free(optstr); 4608 4609 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 4610 has_luks = optstr && strcmp(optstr, "luks") == 0; 4611 g_free(optstr); 4612 4613 if (has_luks) { 4614 size_t headerlen; 4615 4616 if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) { 4617 goto err; 4618 } 4619 4620 luks_payload_size = ROUND_UP(headerlen, cluster_size); 4621 } 4622 4623 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 4624 virtual_size = ROUND_UP(virtual_size, cluster_size); 4625 4626 /* Check that virtual disk size is valid */ 4627 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 4628 cluster_size / sizeof(uint64_t)); 4629 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 4630 error_setg(&local_err, "The image size is too large " 4631 "(try using a larger cluster size)"); 4632 goto err; 4633 } 4634 4635 /* Account for input image */ 4636 if (in_bs) { 4637 int64_t ssize = bdrv_getlength(in_bs); 4638 if (ssize < 0) { 4639 error_setg_errno(&local_err, -ssize, 4640 "Unable to get image virtual_size"); 4641 goto err; 4642 } 4643 4644 virtual_size = ROUND_UP(ssize, cluster_size); 4645 4646 if (has_backing_file) { 4647 /* We don't how much of the backing chain is shared by the input 4648 * image and the new image file. In the worst case the new image's 4649 * backing file has nothing in common with the input image. Be 4650 * conservative and assume all clusters need to be written. 4651 */ 4652 required = virtual_size; 4653 } else { 4654 int64_t offset; 4655 int64_t pnum = 0; 4656 4657 for (offset = 0; offset < ssize; offset += pnum) { 4658 int ret; 4659 4660 ret = bdrv_block_status_above(in_bs, NULL, offset, 4661 ssize - offset, &pnum, NULL, 4662 NULL); 4663 if (ret < 0) { 4664 error_setg_errno(&local_err, -ret, 4665 "Unable to get block status"); 4666 goto err; 4667 } 4668 4669 if (ret & BDRV_BLOCK_ZERO) { 4670 /* Skip zero regions (safe with no backing file) */ 4671 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 4672 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 4673 /* Extend pnum to end of cluster for next iteration */ 4674 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 4675 4676 /* Count clusters we've seen */ 4677 required += offset % cluster_size + pnum; 4678 } 4679 } 4680 } 4681 } 4682 4683 /* Take into account preallocation. Nothing special is needed for 4684 * PREALLOC_MODE_METADATA since metadata is always counted. 4685 */ 4686 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 4687 required = virtual_size; 4688 } 4689 4690 info = g_new(BlockMeasureInfo, 1); 4691 info->fully_allocated = 4692 qcow2_calc_prealloc_size(virtual_size, cluster_size, 4693 ctz32(refcount_bits)) + luks_payload_size; 4694 4695 /* Remove data clusters that are not required. This overestimates the 4696 * required size because metadata needed for the fully allocated file is 4697 * still counted. 4698 */ 4699 info->required = info->fully_allocated - virtual_size + required; 4700 return info; 4701 4702 err: 4703 error_propagate(errp, local_err); 4704 return NULL; 4705 } 4706 4707 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4708 { 4709 BDRVQcow2State *s = bs->opaque; 4710 bdi->unallocated_blocks_are_zero = true; 4711 bdi->cluster_size = s->cluster_size; 4712 bdi->vm_state_offset = qcow2_vm_state_offset(s); 4713 return 0; 4714 } 4715 4716 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 4717 Error **errp) 4718 { 4719 BDRVQcow2State *s = bs->opaque; 4720 ImageInfoSpecific *spec_info; 4721 QCryptoBlockInfo *encrypt_info = NULL; 4722 Error *local_err = NULL; 4723 4724 if (s->crypto != NULL) { 4725 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err); 4726 if (local_err) { 4727 error_propagate(errp, local_err); 4728 return NULL; 4729 } 4730 } 4731 4732 spec_info = g_new(ImageInfoSpecific, 1); 4733 *spec_info = (ImageInfoSpecific){ 4734 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 4735 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 4736 }; 4737 if (s->qcow_version == 2) { 4738 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4739 .compat = g_strdup("0.10"), 4740 .refcount_bits = s->refcount_bits, 4741 }; 4742 } else if (s->qcow_version == 3) { 4743 Qcow2BitmapInfoList *bitmaps; 4744 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err); 4745 if (local_err) { 4746 error_propagate(errp, local_err); 4747 qapi_free_ImageInfoSpecific(spec_info); 4748 return NULL; 4749 } 4750 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4751 .compat = g_strdup("1.1"), 4752 .lazy_refcounts = s->compatible_features & 4753 QCOW2_COMPAT_LAZY_REFCOUNTS, 4754 .has_lazy_refcounts = true, 4755 .corrupt = s->incompatible_features & 4756 QCOW2_INCOMPAT_CORRUPT, 4757 .has_corrupt = true, 4758 .refcount_bits = s->refcount_bits, 4759 .has_bitmaps = !!bitmaps, 4760 .bitmaps = bitmaps, 4761 .has_data_file = !!s->image_data_file, 4762 .data_file = g_strdup(s->image_data_file), 4763 .has_data_file_raw = has_data_file(bs), 4764 .data_file_raw = data_file_is_raw(bs), 4765 }; 4766 } else { 4767 /* if this assertion fails, this probably means a new version was 4768 * added without having it covered here */ 4769 assert(false); 4770 } 4771 4772 if (encrypt_info) { 4773 ImageInfoSpecificQCow2Encryption *qencrypt = 4774 g_new(ImageInfoSpecificQCow2Encryption, 1); 4775 switch (encrypt_info->format) { 4776 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 4777 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 4778 break; 4779 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 4780 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 4781 qencrypt->u.luks = encrypt_info->u.luks; 4782 break; 4783 default: 4784 abort(); 4785 } 4786 /* Since we did shallow copy above, erase any pointers 4787 * in the original info */ 4788 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 4789 qapi_free_QCryptoBlockInfo(encrypt_info); 4790 4791 spec_info->u.qcow2.data->has_encrypt = true; 4792 spec_info->u.qcow2.data->encrypt = qencrypt; 4793 } 4794 4795 return spec_info; 4796 } 4797 4798 static int qcow2_has_zero_init(BlockDriverState *bs) 4799 { 4800 BDRVQcow2State *s = bs->opaque; 4801 bool preallocated; 4802 4803 if (qemu_in_coroutine()) { 4804 qemu_co_mutex_lock(&s->lock); 4805 } 4806 /* 4807 * Check preallocation status: Preallocated images have all L2 4808 * tables allocated, nonpreallocated images have none. It is 4809 * therefore enough to check the first one. 4810 */ 4811 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 4812 if (qemu_in_coroutine()) { 4813 qemu_co_mutex_unlock(&s->lock); 4814 } 4815 4816 if (!preallocated) { 4817 return 1; 4818 } else if (bs->encrypted) { 4819 return 0; 4820 } else { 4821 return bdrv_has_zero_init(s->data_file->bs); 4822 } 4823 } 4824 4825 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4826 int64_t pos) 4827 { 4828 BDRVQcow2State *s = bs->opaque; 4829 4830 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 4831 return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos, 4832 qiov->size, qiov, 0, 0); 4833 } 4834 4835 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4836 int64_t pos) 4837 { 4838 BDRVQcow2State *s = bs->opaque; 4839 4840 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 4841 return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos, 4842 qiov->size, qiov, 0, 0); 4843 } 4844 4845 /* 4846 * Downgrades an image's version. To achieve this, any incompatible features 4847 * have to be removed. 4848 */ 4849 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 4850 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 4851 Error **errp) 4852 { 4853 BDRVQcow2State *s = bs->opaque; 4854 int current_version = s->qcow_version; 4855 int ret; 4856 4857 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 4858 assert(target_version < current_version); 4859 4860 /* There are no other versions (now) that you can downgrade to */ 4861 assert(target_version == 2); 4862 4863 if (s->refcount_order != 4) { 4864 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 4865 return -ENOTSUP; 4866 } 4867 4868 if (has_data_file(bs)) { 4869 error_setg(errp, "Cannot downgrade an image with a data file"); 4870 return -ENOTSUP; 4871 } 4872 4873 /* clear incompatible features */ 4874 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 4875 ret = qcow2_mark_clean(bs); 4876 if (ret < 0) { 4877 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4878 return ret; 4879 } 4880 } 4881 4882 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 4883 * the first place; if that happens nonetheless, returning -ENOTSUP is the 4884 * best thing to do anyway */ 4885 4886 if (s->incompatible_features) { 4887 error_setg(errp, "Cannot downgrade an image with incompatible features " 4888 "%#" PRIx64 " set", s->incompatible_features); 4889 return -ENOTSUP; 4890 } 4891 4892 /* since we can ignore compatible features, we can set them to 0 as well */ 4893 s->compatible_features = 0; 4894 /* if lazy refcounts have been used, they have already been fixed through 4895 * clearing the dirty flag */ 4896 4897 /* clearing autoclear features is trivial */ 4898 s->autoclear_features = 0; 4899 4900 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 4901 if (ret < 0) { 4902 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 4903 return ret; 4904 } 4905 4906 s->qcow_version = target_version; 4907 ret = qcow2_update_header(bs); 4908 if (ret < 0) { 4909 s->qcow_version = current_version; 4910 error_setg_errno(errp, -ret, "Failed to update the image header"); 4911 return ret; 4912 } 4913 return 0; 4914 } 4915 4916 typedef enum Qcow2AmendOperation { 4917 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 4918 * statically initialized to so that the helper CB can discern the first 4919 * invocation from an operation change */ 4920 QCOW2_NO_OPERATION = 0, 4921 4922 QCOW2_CHANGING_REFCOUNT_ORDER, 4923 QCOW2_DOWNGRADING, 4924 } Qcow2AmendOperation; 4925 4926 typedef struct Qcow2AmendHelperCBInfo { 4927 /* The code coordinating the amend operations should only modify 4928 * these four fields; the rest will be managed by the CB */ 4929 BlockDriverAmendStatusCB *original_status_cb; 4930 void *original_cb_opaque; 4931 4932 Qcow2AmendOperation current_operation; 4933 4934 /* Total number of operations to perform (only set once) */ 4935 int total_operations; 4936 4937 /* The following fields are managed by the CB */ 4938 4939 /* Number of operations completed */ 4940 int operations_completed; 4941 4942 /* Cumulative offset of all completed operations */ 4943 int64_t offset_completed; 4944 4945 Qcow2AmendOperation last_operation; 4946 int64_t last_work_size; 4947 } Qcow2AmendHelperCBInfo; 4948 4949 static void qcow2_amend_helper_cb(BlockDriverState *bs, 4950 int64_t operation_offset, 4951 int64_t operation_work_size, void *opaque) 4952 { 4953 Qcow2AmendHelperCBInfo *info = opaque; 4954 int64_t current_work_size; 4955 int64_t projected_work_size; 4956 4957 if (info->current_operation != info->last_operation) { 4958 if (info->last_operation != QCOW2_NO_OPERATION) { 4959 info->offset_completed += info->last_work_size; 4960 info->operations_completed++; 4961 } 4962 4963 info->last_operation = info->current_operation; 4964 } 4965 4966 assert(info->total_operations > 0); 4967 assert(info->operations_completed < info->total_operations); 4968 4969 info->last_work_size = operation_work_size; 4970 4971 current_work_size = info->offset_completed + operation_work_size; 4972 4973 /* current_work_size is the total work size for (operations_completed + 1) 4974 * operations (which includes this one), so multiply it by the number of 4975 * operations not covered and divide it by the number of operations 4976 * covered to get a projection for the operations not covered */ 4977 projected_work_size = current_work_size * (info->total_operations - 4978 info->operations_completed - 1) 4979 / (info->operations_completed + 1); 4980 4981 info->original_status_cb(bs, info->offset_completed + operation_offset, 4982 current_work_size + projected_work_size, 4983 info->original_cb_opaque); 4984 } 4985 4986 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 4987 BlockDriverAmendStatusCB *status_cb, 4988 void *cb_opaque, 4989 Error **errp) 4990 { 4991 BDRVQcow2State *s = bs->opaque; 4992 int old_version = s->qcow_version, new_version = old_version; 4993 uint64_t new_size = 0; 4994 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 4995 bool lazy_refcounts = s->use_lazy_refcounts; 4996 bool data_file_raw = data_file_is_raw(bs); 4997 const char *compat = NULL; 4998 uint64_t cluster_size = s->cluster_size; 4999 bool encrypt; 5000 int encformat; 5001 int refcount_bits = s->refcount_bits; 5002 int ret; 5003 QemuOptDesc *desc = opts->list->desc; 5004 Qcow2AmendHelperCBInfo helper_cb_info; 5005 5006 while (desc && desc->name) { 5007 if (!qemu_opt_find(opts, desc->name)) { 5008 /* only change explicitly defined options */ 5009 desc++; 5010 continue; 5011 } 5012 5013 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5014 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5015 if (!compat) { 5016 /* preserve default */ 5017 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5018 new_version = 2; 5019 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5020 new_version = 3; 5021 } else { 5022 error_setg(errp, "Unknown compatibility level %s", compat); 5023 return -EINVAL; 5024 } 5025 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 5026 error_setg(errp, "Cannot change preallocation mode"); 5027 return -ENOTSUP; 5028 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5029 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5030 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5031 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5032 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5033 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5034 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 5035 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 5036 !!s->crypto); 5037 5038 if (encrypt != !!s->crypto) { 5039 error_setg(errp, 5040 "Changing the encryption flag is not supported"); 5041 return -ENOTSUP; 5042 } 5043 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 5044 encformat = qcow2_crypt_method_from_format( 5045 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 5046 5047 if (encformat != s->crypt_method_header) { 5048 error_setg(errp, 5049 "Changing the encryption format is not supported"); 5050 return -ENOTSUP; 5051 } 5052 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5053 error_setg(errp, 5054 "Changing the encryption parameters is not supported"); 5055 return -ENOTSUP; 5056 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 5057 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 5058 cluster_size); 5059 if (cluster_size != s->cluster_size) { 5060 error_setg(errp, "Changing the cluster size is not supported"); 5061 return -ENOTSUP; 5062 } 5063 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5064 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5065 lazy_refcounts); 5066 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5067 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5068 refcount_bits); 5069 5070 if (refcount_bits <= 0 || refcount_bits > 64 || 5071 !is_power_of_2(refcount_bits)) 5072 { 5073 error_setg(errp, "Refcount width must be a power of two and " 5074 "may not exceed 64 bits"); 5075 return -EINVAL; 5076 } 5077 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5078 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5079 if (data_file && !has_data_file(bs)) { 5080 error_setg(errp, "data-file can only be set for images that " 5081 "use an external data file"); 5082 return -EINVAL; 5083 } 5084 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5085 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5086 data_file_raw); 5087 if (data_file_raw && !data_file_is_raw(bs)) { 5088 error_setg(errp, "data-file-raw cannot be set on existing " 5089 "images"); 5090 return -EINVAL; 5091 } 5092 } else { 5093 /* if this point is reached, this probably means a new option was 5094 * added without having it covered here */ 5095 abort(); 5096 } 5097 5098 desc++; 5099 } 5100 5101 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5102 .original_status_cb = status_cb, 5103 .original_cb_opaque = cb_opaque, 5104 .total_operations = (new_version < old_version) 5105 + (s->refcount_bits != refcount_bits) 5106 }; 5107 5108 /* Upgrade first (some features may require compat=1.1) */ 5109 if (new_version > old_version) { 5110 s->qcow_version = new_version; 5111 ret = qcow2_update_header(bs); 5112 if (ret < 0) { 5113 s->qcow_version = old_version; 5114 error_setg_errno(errp, -ret, "Failed to update the image header"); 5115 return ret; 5116 } 5117 } 5118 5119 if (s->refcount_bits != refcount_bits) { 5120 int refcount_order = ctz32(refcount_bits); 5121 5122 if (new_version < 3 && refcount_bits != 16) { 5123 error_setg(errp, "Refcount widths other than 16 bits require " 5124 "compatibility level 1.1 or above (use compat=1.1 or " 5125 "greater)"); 5126 return -EINVAL; 5127 } 5128 5129 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5130 ret = qcow2_change_refcount_order(bs, refcount_order, 5131 &qcow2_amend_helper_cb, 5132 &helper_cb_info, errp); 5133 if (ret < 0) { 5134 return ret; 5135 } 5136 } 5137 5138 /* data-file-raw blocks backing files, so clear it first if requested */ 5139 if (data_file_raw) { 5140 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5141 } else { 5142 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5143 } 5144 5145 if (data_file) { 5146 g_free(s->image_data_file); 5147 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5148 } 5149 5150 ret = qcow2_update_header(bs); 5151 if (ret < 0) { 5152 error_setg_errno(errp, -ret, "Failed to update the image header"); 5153 return ret; 5154 } 5155 5156 if (backing_file || backing_format) { 5157 ret = qcow2_change_backing_file(bs, 5158 backing_file ?: s->image_backing_file, 5159 backing_format ?: s->image_backing_format); 5160 if (ret < 0) { 5161 error_setg_errno(errp, -ret, "Failed to change the backing file"); 5162 return ret; 5163 } 5164 } 5165 5166 if (s->use_lazy_refcounts != lazy_refcounts) { 5167 if (lazy_refcounts) { 5168 if (new_version < 3) { 5169 error_setg(errp, "Lazy refcounts only supported with " 5170 "compatibility level 1.1 and above (use compat=1.1 " 5171 "or greater)"); 5172 return -EINVAL; 5173 } 5174 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5175 ret = qcow2_update_header(bs); 5176 if (ret < 0) { 5177 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5178 error_setg_errno(errp, -ret, "Failed to update the image header"); 5179 return ret; 5180 } 5181 s->use_lazy_refcounts = true; 5182 } else { 5183 /* make image clean first */ 5184 ret = qcow2_mark_clean(bs); 5185 if (ret < 0) { 5186 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5187 return ret; 5188 } 5189 /* now disallow lazy refcounts */ 5190 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5191 ret = qcow2_update_header(bs); 5192 if (ret < 0) { 5193 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5194 error_setg_errno(errp, -ret, "Failed to update the image header"); 5195 return ret; 5196 } 5197 s->use_lazy_refcounts = false; 5198 } 5199 } 5200 5201 if (new_size) { 5202 BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), 5203 BLK_PERM_RESIZE, BLK_PERM_ALL); 5204 ret = blk_insert_bs(blk, bs, errp); 5205 if (ret < 0) { 5206 blk_unref(blk); 5207 return ret; 5208 } 5209 5210 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, errp); 5211 blk_unref(blk); 5212 if (ret < 0) { 5213 return ret; 5214 } 5215 } 5216 5217 /* Downgrade last (so unsupported features can be removed before) */ 5218 if (new_version < old_version) { 5219 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5220 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5221 &helper_cb_info, errp); 5222 if (ret < 0) { 5223 return ret; 5224 } 5225 } 5226 5227 return 0; 5228 } 5229 5230 /* 5231 * If offset or size are negative, respectively, they will not be included in 5232 * the BLOCK_IMAGE_CORRUPTED event emitted. 5233 * fatal will be ignored for read-only BDS; corruptions found there will always 5234 * be considered non-fatal. 5235 */ 5236 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5237 int64_t size, const char *message_format, ...) 5238 { 5239 BDRVQcow2State *s = bs->opaque; 5240 const char *node_name; 5241 char *message; 5242 va_list ap; 5243 5244 fatal = fatal && bdrv_is_writable(bs); 5245 5246 if (s->signaled_corruption && 5247 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5248 { 5249 return; 5250 } 5251 5252 va_start(ap, message_format); 5253 message = g_strdup_vprintf(message_format, ap); 5254 va_end(ap); 5255 5256 if (fatal) { 5257 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 5258 "corruption events will be suppressed\n", message); 5259 } else { 5260 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 5261 "corruption events will be suppressed\n", message); 5262 } 5263 5264 node_name = bdrv_get_node_name(bs); 5265 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 5266 *node_name != '\0', node_name, 5267 message, offset >= 0, offset, 5268 size >= 0, size, 5269 fatal); 5270 g_free(message); 5271 5272 if (fatal) { 5273 qcow2_mark_corrupt(bs); 5274 bs->drv = NULL; /* make BDS unusable */ 5275 } 5276 5277 s->signaled_corruption = true; 5278 } 5279 5280 static QemuOptsList qcow2_create_opts = { 5281 .name = "qcow2-create-opts", 5282 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 5283 .desc = { 5284 { 5285 .name = BLOCK_OPT_SIZE, 5286 .type = QEMU_OPT_SIZE, 5287 .help = "Virtual disk size" 5288 }, 5289 { 5290 .name = BLOCK_OPT_COMPAT_LEVEL, 5291 .type = QEMU_OPT_STRING, 5292 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" 5293 }, 5294 { 5295 .name = BLOCK_OPT_BACKING_FILE, 5296 .type = QEMU_OPT_STRING, 5297 .help = "File name of a base image" 5298 }, 5299 { 5300 .name = BLOCK_OPT_BACKING_FMT, 5301 .type = QEMU_OPT_STRING, 5302 .help = "Image format of the base image" 5303 }, 5304 { 5305 .name = BLOCK_OPT_DATA_FILE, 5306 .type = QEMU_OPT_STRING, 5307 .help = "File name of an external data file" 5308 }, 5309 { 5310 .name = BLOCK_OPT_DATA_FILE_RAW, 5311 .type = QEMU_OPT_BOOL, 5312 .help = "The external data file must stay valid as a raw image" 5313 }, 5314 { 5315 .name = BLOCK_OPT_ENCRYPT, 5316 .type = QEMU_OPT_BOOL, 5317 .help = "Encrypt the image with format 'aes'. (Deprecated " 5318 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 5319 }, 5320 { 5321 .name = BLOCK_OPT_ENCRYPT_FORMAT, 5322 .type = QEMU_OPT_STRING, 5323 .help = "Encrypt the image, format choices: 'aes', 'luks'", 5324 }, 5325 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 5326 "ID of secret providing qcow AES key or LUKS passphrase"), 5327 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 5328 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 5329 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 5330 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 5331 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 5332 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 5333 { 5334 .name = BLOCK_OPT_CLUSTER_SIZE, 5335 .type = QEMU_OPT_SIZE, 5336 .help = "qcow2 cluster size", 5337 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 5338 }, 5339 { 5340 .name = BLOCK_OPT_PREALLOC, 5341 .type = QEMU_OPT_STRING, 5342 .help = "Preallocation mode (allowed values: off, metadata, " 5343 "falloc, full)" 5344 }, 5345 { 5346 .name = BLOCK_OPT_LAZY_REFCOUNTS, 5347 .type = QEMU_OPT_BOOL, 5348 .help = "Postpone refcount updates", 5349 .def_value_str = "off" 5350 }, 5351 { 5352 .name = BLOCK_OPT_REFCOUNT_BITS, 5353 .type = QEMU_OPT_NUMBER, 5354 .help = "Width of a reference count entry in bits", 5355 .def_value_str = "16" 5356 }, 5357 { /* end of list */ } 5358 } 5359 }; 5360 5361 static const char *const qcow2_strong_runtime_opts[] = { 5362 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 5363 5364 NULL 5365 }; 5366 5367 BlockDriver bdrv_qcow2 = { 5368 .format_name = "qcow2", 5369 .instance_size = sizeof(BDRVQcow2State), 5370 .bdrv_probe = qcow2_probe, 5371 .bdrv_open = qcow2_open, 5372 .bdrv_close = qcow2_close, 5373 .bdrv_reopen_prepare = qcow2_reopen_prepare, 5374 .bdrv_reopen_commit = qcow2_reopen_commit, 5375 .bdrv_reopen_abort = qcow2_reopen_abort, 5376 .bdrv_join_options = qcow2_join_options, 5377 .bdrv_child_perm = bdrv_format_default_perms, 5378 .bdrv_co_create_opts = qcow2_co_create_opts, 5379 .bdrv_co_create = qcow2_co_create, 5380 .bdrv_has_zero_init = qcow2_has_zero_init, 5381 .bdrv_has_zero_init_truncate = bdrv_has_zero_init_1, 5382 .bdrv_co_block_status = qcow2_co_block_status, 5383 5384 .bdrv_co_preadv_part = qcow2_co_preadv_part, 5385 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 5386 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 5387 5388 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 5389 .bdrv_co_pdiscard = qcow2_co_pdiscard, 5390 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 5391 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 5392 .bdrv_co_truncate = qcow2_co_truncate, 5393 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 5394 .bdrv_make_empty = qcow2_make_empty, 5395 5396 .bdrv_snapshot_create = qcow2_snapshot_create, 5397 .bdrv_snapshot_goto = qcow2_snapshot_goto, 5398 .bdrv_snapshot_delete = qcow2_snapshot_delete, 5399 .bdrv_snapshot_list = qcow2_snapshot_list, 5400 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 5401 .bdrv_measure = qcow2_measure, 5402 .bdrv_get_info = qcow2_get_info, 5403 .bdrv_get_specific_info = qcow2_get_specific_info, 5404 5405 .bdrv_save_vmstate = qcow2_save_vmstate, 5406 .bdrv_load_vmstate = qcow2_load_vmstate, 5407 5408 .supports_backing = true, 5409 .bdrv_change_backing_file = qcow2_change_backing_file, 5410 5411 .bdrv_refresh_limits = qcow2_refresh_limits, 5412 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 5413 .bdrv_inactivate = qcow2_inactivate, 5414 5415 .create_opts = &qcow2_create_opts, 5416 .strong_runtime_opts = qcow2_strong_runtime_opts, 5417 .mutable_opts = mutable_opts, 5418 .bdrv_co_check = qcow2_co_check, 5419 .bdrv_amend_options = qcow2_amend_options, 5420 5421 .bdrv_detach_aio_context = qcow2_detach_aio_context, 5422 .bdrv_attach_aio_context = qcow2_attach_aio_context, 5423 5424 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 5425 .bdrv_co_remove_persistent_dirty_bitmap = 5426 qcow2_co_remove_persistent_dirty_bitmap, 5427 }; 5428 5429 static void bdrv_qcow2_init(void) 5430 { 5431 bdrv_register(&bdrv_qcow2); 5432 } 5433 5434 block_init(bdrv_qcow2_init); 5435