1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "sysemu/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qapi/qmp/qdict.h" 36 #include "qapi/qmp/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qemu/memalign.h" 42 #include "qapi/qobject-input-visitor.h" 43 #include "qapi/qapi-visit-block-core.h" 44 #include "crypto.h" 45 #include "block/aio_task.h" 46 #include "block/dirty-bitmap.h" 47 48 /* 49 Differences with QCOW: 50 51 - Support for multiple incremental snapshots. 52 - Memory management by reference counts. 53 - Clusters which have a reference count of one have the bit 54 QCOW_OFLAG_COPIED to optimize write performance. 55 - Size of compressed clusters is stored in sectors to reduce bit usage 56 in the cluster offsets. 57 - Support for storing additional data (such as the VM state) in the 58 snapshots. 59 - If a backing store is used, the cluster size is not constrained 60 (could be backported to QCOW). 61 - L2 tables have always a size of one cluster. 62 */ 63 64 65 typedef struct { 66 uint32_t magic; 67 uint32_t len; 68 } QEMU_PACKED QCowExtension; 69 70 #define QCOW2_EXT_MAGIC_END 0 71 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca 72 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 73 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 74 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 75 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 76 77 static int coroutine_fn 78 qcow2_co_preadv_compressed(BlockDriverState *bs, 79 uint64_t l2_entry, 80 uint64_t offset, 81 uint64_t bytes, 82 QEMUIOVector *qiov, 83 size_t qiov_offset); 84 85 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 86 { 87 const QCowHeader *cow_header = (const void *)buf; 88 89 if (buf_size >= sizeof(QCowHeader) && 90 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 91 be32_to_cpu(cow_header->version) >= 2) 92 return 100; 93 else 94 return 0; 95 } 96 97 98 static int qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 99 uint8_t *buf, size_t buflen, 100 void *opaque, Error **errp) 101 { 102 BlockDriverState *bs = opaque; 103 BDRVQcow2State *s = bs->opaque; 104 ssize_t ret; 105 106 if ((offset + buflen) > s->crypto_header.length) { 107 error_setg(errp, "Request for data outside of extension header"); 108 return -1; 109 } 110 111 ret = bdrv_pread(bs->file, s->crypto_header.offset + offset, buflen, buf, 112 0); 113 if (ret < 0) { 114 error_setg_errno(errp, -ret, "Could not read encryption header"); 115 return -1; 116 } 117 return 0; 118 } 119 120 121 static int qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 122 void *opaque, Error **errp) 123 { 124 BlockDriverState *bs = opaque; 125 BDRVQcow2State *s = bs->opaque; 126 int64_t ret; 127 int64_t clusterlen; 128 129 ret = qcow2_alloc_clusters(bs, headerlen); 130 if (ret < 0) { 131 error_setg_errno(errp, -ret, 132 "Cannot allocate cluster for LUKS header size %zu", 133 headerlen); 134 return -1; 135 } 136 137 s->crypto_header.length = headerlen; 138 s->crypto_header.offset = ret; 139 140 /* 141 * Zero fill all space in cluster so it has predictable 142 * content, as we may not initialize some regions of the 143 * header (eg only 1 out of 8 key slots will be initialized) 144 */ 145 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 146 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 147 ret = bdrv_pwrite_zeroes(bs->file, 148 ret, 149 clusterlen, 0); 150 if (ret < 0) { 151 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 152 return -1; 153 } 154 155 return 0; 156 } 157 158 159 static int qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 160 const uint8_t *buf, size_t buflen, 161 void *opaque, Error **errp) 162 { 163 BlockDriverState *bs = opaque; 164 BDRVQcow2State *s = bs->opaque; 165 ssize_t ret; 166 167 if ((offset + buflen) > s->crypto_header.length) { 168 error_setg(errp, "Request for data outside of extension header"); 169 return -1; 170 } 171 172 ret = bdrv_pwrite(bs->file, s->crypto_header.offset + offset, buflen, buf, 173 0); 174 if (ret < 0) { 175 error_setg_errno(errp, -ret, "Could not read encryption header"); 176 return -1; 177 } 178 return 0; 179 } 180 181 static QDict* 182 qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp) 183 { 184 QDict *cryptoopts_qdict; 185 QDict *opts_qdict; 186 187 /* Extract "encrypt." options into a qdict */ 188 opts_qdict = qemu_opts_to_qdict(opts, NULL); 189 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 190 qobject_unref(opts_qdict); 191 qdict_put_str(cryptoopts_qdict, "format", fmt); 192 return cryptoopts_qdict; 193 } 194 195 /* 196 * read qcow2 extension and fill bs 197 * start reading from start_offset 198 * finish reading upon magic of value 0 or when end_offset reached 199 * unknown magic is skipped (future extension this version knows nothing about) 200 * return 0 upon success, non-0 otherwise 201 */ 202 static int coroutine_fn GRAPH_RDLOCK 203 qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 204 uint64_t end_offset, void **p_feature_table, 205 int flags, bool *need_update_header, Error **errp) 206 { 207 BDRVQcow2State *s = bs->opaque; 208 QCowExtension ext; 209 uint64_t offset; 210 int ret; 211 Qcow2BitmapHeaderExt bitmaps_ext; 212 213 if (need_update_header != NULL) { 214 *need_update_header = false; 215 } 216 217 #ifdef DEBUG_EXT 218 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 219 #endif 220 offset = start_offset; 221 while (offset < end_offset) { 222 223 #ifdef DEBUG_EXT 224 /* Sanity check */ 225 if (offset > s->cluster_size) 226 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 227 228 printf("attempting to read extended header in offset %lu\n", offset); 229 #endif 230 231 ret = bdrv_co_pread(bs->file, offset, sizeof(ext), &ext, 0); 232 if (ret < 0) { 233 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 234 "pread fail from offset %" PRIu64, offset); 235 return 1; 236 } 237 ext.magic = be32_to_cpu(ext.magic); 238 ext.len = be32_to_cpu(ext.len); 239 offset += sizeof(ext); 240 #ifdef DEBUG_EXT 241 printf("ext.magic = 0x%x\n", ext.magic); 242 #endif 243 if (offset > end_offset || ext.len > end_offset - offset) { 244 error_setg(errp, "Header extension too large"); 245 return -EINVAL; 246 } 247 248 switch (ext.magic) { 249 case QCOW2_EXT_MAGIC_END: 250 return 0; 251 252 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 253 if (ext.len >= sizeof(bs->backing_format)) { 254 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 255 " too large (>=%zu)", ext.len, 256 sizeof(bs->backing_format)); 257 return 2; 258 } 259 ret = bdrv_co_pread(bs->file, offset, ext.len, bs->backing_format, 0); 260 if (ret < 0) { 261 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 262 "Could not read format name"); 263 return 3; 264 } 265 bs->backing_format[ext.len] = '\0'; 266 s->image_backing_format = g_strdup(bs->backing_format); 267 #ifdef DEBUG_EXT 268 printf("Qcow2: Got format extension %s\n", bs->backing_format); 269 #endif 270 break; 271 272 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 273 if (p_feature_table != NULL) { 274 void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 275 ret = bdrv_co_pread(bs->file, offset, ext.len, feature_table, 0); 276 if (ret < 0) { 277 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 278 "Could not read table"); 279 g_free(feature_table); 280 return ret; 281 } 282 283 *p_feature_table = feature_table; 284 } 285 break; 286 287 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 288 unsigned int cflags = 0; 289 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 290 error_setg(errp, "CRYPTO header extension only " 291 "expected with LUKS encryption method"); 292 return -EINVAL; 293 } 294 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 295 error_setg(errp, "CRYPTO header extension size %u, " 296 "but expected size %zu", ext.len, 297 sizeof(Qcow2CryptoHeaderExtension)); 298 return -EINVAL; 299 } 300 301 ret = bdrv_co_pread(bs->file, offset, ext.len, &s->crypto_header, 0); 302 if (ret < 0) { 303 error_setg_errno(errp, -ret, 304 "Unable to read CRYPTO header extension"); 305 return ret; 306 } 307 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 308 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 309 310 if ((s->crypto_header.offset % s->cluster_size) != 0) { 311 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 312 "not a multiple of cluster size '%u'", 313 s->crypto_header.offset, s->cluster_size); 314 return -EINVAL; 315 } 316 317 if (flags & BDRV_O_NO_IO) { 318 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 319 } 320 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 321 qcow2_crypto_hdr_read_func, 322 bs, cflags, QCOW2_MAX_THREADS, errp); 323 if (!s->crypto) { 324 return -EINVAL; 325 } 326 } break; 327 328 case QCOW2_EXT_MAGIC_BITMAPS: 329 if (ext.len != sizeof(bitmaps_ext)) { 330 error_setg_errno(errp, -ret, "bitmaps_ext: " 331 "Invalid extension length"); 332 return -EINVAL; 333 } 334 335 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 336 if (s->qcow_version < 3) { 337 /* Let's be a bit more specific */ 338 warn_report("This qcow2 v2 image contains bitmaps, but " 339 "they may have been modified by a program " 340 "without persistent bitmap support; so now " 341 "they must all be considered inconsistent"); 342 } else { 343 warn_report("a program lacking bitmap support " 344 "modified this file, so all bitmaps are now " 345 "considered inconsistent"); 346 } 347 error_printf("Some clusters may be leaked, " 348 "run 'qemu-img check -r' on the image " 349 "file to fix."); 350 if (need_update_header != NULL) { 351 /* Updating is needed to drop invalid bitmap extension. */ 352 *need_update_header = true; 353 } 354 break; 355 } 356 357 ret = bdrv_co_pread(bs->file, offset, ext.len, &bitmaps_ext, 0); 358 if (ret < 0) { 359 error_setg_errno(errp, -ret, "bitmaps_ext: " 360 "Could not read ext header"); 361 return ret; 362 } 363 364 if (bitmaps_ext.reserved32 != 0) { 365 error_setg_errno(errp, -ret, "bitmaps_ext: " 366 "Reserved field is not zero"); 367 return -EINVAL; 368 } 369 370 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 371 bitmaps_ext.bitmap_directory_size = 372 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 373 bitmaps_ext.bitmap_directory_offset = 374 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 375 376 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 377 error_setg(errp, 378 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 379 "exceeding the QEMU supported maximum of %d", 380 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 381 return -EINVAL; 382 } 383 384 if (bitmaps_ext.nb_bitmaps == 0) { 385 error_setg(errp, "found bitmaps extension with zero bitmaps"); 386 return -EINVAL; 387 } 388 389 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) { 390 error_setg(errp, "bitmaps_ext: " 391 "invalid bitmap directory offset"); 392 return -EINVAL; 393 } 394 395 if (bitmaps_ext.bitmap_directory_size > 396 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 397 error_setg(errp, "bitmaps_ext: " 398 "bitmap directory size (%" PRIu64 ") exceeds " 399 "the maximum supported size (%d)", 400 bitmaps_ext.bitmap_directory_size, 401 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 402 return -EINVAL; 403 } 404 405 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 406 s->bitmap_directory_offset = 407 bitmaps_ext.bitmap_directory_offset; 408 s->bitmap_directory_size = 409 bitmaps_ext.bitmap_directory_size; 410 411 #ifdef DEBUG_EXT 412 printf("Qcow2: Got bitmaps extension: " 413 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 414 s->bitmap_directory_offset, s->nb_bitmaps); 415 #endif 416 break; 417 418 case QCOW2_EXT_MAGIC_DATA_FILE: 419 { 420 s->image_data_file = g_malloc0(ext.len + 1); 421 ret = bdrv_co_pread(bs->file, offset, ext.len, s->image_data_file, 0); 422 if (ret < 0) { 423 error_setg_errno(errp, -ret, 424 "ERROR: Could not read data file name"); 425 return ret; 426 } 427 #ifdef DEBUG_EXT 428 printf("Qcow2: Got external data file %s\n", s->image_data_file); 429 #endif 430 break; 431 } 432 433 default: 434 /* unknown magic - save it in case we need to rewrite the header */ 435 /* If you add a new feature, make sure to also update the fast 436 * path of qcow2_make_empty() to deal with it. */ 437 { 438 Qcow2UnknownHeaderExtension *uext; 439 440 uext = g_malloc0(sizeof(*uext) + ext.len); 441 uext->magic = ext.magic; 442 uext->len = ext.len; 443 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 444 445 ret = bdrv_co_pread(bs->file, offset, uext->len, uext->data, 0); 446 if (ret < 0) { 447 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 448 "Could not read data"); 449 return ret; 450 } 451 } 452 break; 453 } 454 455 offset += ((ext.len + 7) & ~7); 456 } 457 458 return 0; 459 } 460 461 static void cleanup_unknown_header_ext(BlockDriverState *bs) 462 { 463 BDRVQcow2State *s = bs->opaque; 464 Qcow2UnknownHeaderExtension *uext, *next; 465 466 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 467 QLIST_REMOVE(uext, next); 468 g_free(uext); 469 } 470 } 471 472 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 473 uint64_t mask) 474 { 475 g_autoptr(GString) features = g_string_sized_new(60); 476 477 while (table && table->name[0] != '\0') { 478 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 479 if (mask & (1ULL << table->bit)) { 480 if (features->len > 0) { 481 g_string_append(features, ", "); 482 } 483 g_string_append_printf(features, "%.46s", table->name); 484 mask &= ~(1ULL << table->bit); 485 } 486 } 487 table++; 488 } 489 490 if (mask) { 491 if (features->len > 0) { 492 g_string_append(features, ", "); 493 } 494 g_string_append_printf(features, 495 "Unknown incompatible feature: %" PRIx64, mask); 496 } 497 498 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str); 499 } 500 501 /* 502 * Sets the dirty bit and flushes afterwards if necessary. 503 * 504 * The incompatible_features bit is only set if the image file header was 505 * updated successfully. Therefore it is not required to check the return 506 * value of this function. 507 */ 508 int qcow2_mark_dirty(BlockDriverState *bs) 509 { 510 BDRVQcow2State *s = bs->opaque; 511 uint64_t val; 512 int ret; 513 514 assert(s->qcow_version >= 3); 515 516 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 517 return 0; /* already dirty */ 518 } 519 520 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 521 ret = bdrv_pwrite_sync(bs->file, 522 offsetof(QCowHeader, incompatible_features), 523 sizeof(val), &val, 0); 524 if (ret < 0) { 525 return ret; 526 } 527 528 /* Only treat image as dirty if the header was updated successfully */ 529 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 530 return 0; 531 } 532 533 /* 534 * Clears the dirty bit and flushes before if necessary. Only call this 535 * function when there are no pending requests, it does not guard against 536 * concurrent requests dirtying the image. 537 */ 538 static int qcow2_mark_clean(BlockDriverState *bs) 539 { 540 BDRVQcow2State *s = bs->opaque; 541 542 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 543 int ret; 544 545 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 546 547 ret = qcow2_flush_caches(bs); 548 if (ret < 0) { 549 return ret; 550 } 551 552 return qcow2_update_header(bs); 553 } 554 return 0; 555 } 556 557 /* 558 * Marks the image as corrupt. 559 */ 560 int qcow2_mark_corrupt(BlockDriverState *bs) 561 { 562 BDRVQcow2State *s = bs->opaque; 563 564 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 565 return qcow2_update_header(bs); 566 } 567 568 /* 569 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 570 * before if necessary. 571 */ 572 int qcow2_mark_consistent(BlockDriverState *bs) 573 { 574 BDRVQcow2State *s = bs->opaque; 575 576 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 577 int ret = qcow2_flush_caches(bs); 578 if (ret < 0) { 579 return ret; 580 } 581 582 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 583 return qcow2_update_header(bs); 584 } 585 return 0; 586 } 587 588 static void qcow2_add_check_result(BdrvCheckResult *out, 589 const BdrvCheckResult *src, 590 bool set_allocation_info) 591 { 592 out->corruptions += src->corruptions; 593 out->leaks += src->leaks; 594 out->check_errors += src->check_errors; 595 out->corruptions_fixed += src->corruptions_fixed; 596 out->leaks_fixed += src->leaks_fixed; 597 598 if (set_allocation_info) { 599 out->image_end_offset = src->image_end_offset; 600 out->bfi = src->bfi; 601 } 602 } 603 604 static int coroutine_fn GRAPH_RDLOCK 605 qcow2_co_check_locked(BlockDriverState *bs, BdrvCheckResult *result, 606 BdrvCheckMode fix) 607 { 608 BdrvCheckResult snapshot_res = {}; 609 BdrvCheckResult refcount_res = {}; 610 int ret; 611 612 memset(result, 0, sizeof(*result)); 613 614 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix); 615 if (ret < 0) { 616 qcow2_add_check_result(result, &snapshot_res, false); 617 return ret; 618 } 619 620 ret = qcow2_check_refcounts(bs, &refcount_res, fix); 621 qcow2_add_check_result(result, &refcount_res, true); 622 if (ret < 0) { 623 qcow2_add_check_result(result, &snapshot_res, false); 624 return ret; 625 } 626 627 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix); 628 qcow2_add_check_result(result, &snapshot_res, false); 629 if (ret < 0) { 630 return ret; 631 } 632 633 if (fix && result->check_errors == 0 && result->corruptions == 0) { 634 ret = qcow2_mark_clean(bs); 635 if (ret < 0) { 636 return ret; 637 } 638 return qcow2_mark_consistent(bs); 639 } 640 return ret; 641 } 642 643 static int coroutine_fn GRAPH_RDLOCK 644 qcow2_co_check(BlockDriverState *bs, BdrvCheckResult *result, 645 BdrvCheckMode fix) 646 { 647 BDRVQcow2State *s = bs->opaque; 648 int ret; 649 650 qemu_co_mutex_lock(&s->lock); 651 ret = qcow2_co_check_locked(bs, result, fix); 652 qemu_co_mutex_unlock(&s->lock); 653 return ret; 654 } 655 656 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 657 uint64_t entries, size_t entry_len, 658 int64_t max_size_bytes, const char *table_name, 659 Error **errp) 660 { 661 BDRVQcow2State *s = bs->opaque; 662 663 if (entries > max_size_bytes / entry_len) { 664 error_setg(errp, "%s too large", table_name); 665 return -EFBIG; 666 } 667 668 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 669 * because values will be passed to qemu functions taking int64_t. */ 670 if ((INT64_MAX - entries * entry_len < offset) || 671 (offset_into_cluster(s, offset) != 0)) { 672 error_setg(errp, "%s offset invalid", table_name); 673 return -EINVAL; 674 } 675 676 return 0; 677 } 678 679 static const char *const mutable_opts[] = { 680 QCOW2_OPT_LAZY_REFCOUNTS, 681 QCOW2_OPT_DISCARD_REQUEST, 682 QCOW2_OPT_DISCARD_SNAPSHOT, 683 QCOW2_OPT_DISCARD_OTHER, 684 QCOW2_OPT_OVERLAP, 685 QCOW2_OPT_OVERLAP_TEMPLATE, 686 QCOW2_OPT_OVERLAP_MAIN_HEADER, 687 QCOW2_OPT_OVERLAP_ACTIVE_L1, 688 QCOW2_OPT_OVERLAP_ACTIVE_L2, 689 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 690 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 691 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 692 QCOW2_OPT_OVERLAP_INACTIVE_L1, 693 QCOW2_OPT_OVERLAP_INACTIVE_L2, 694 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 695 QCOW2_OPT_CACHE_SIZE, 696 QCOW2_OPT_L2_CACHE_SIZE, 697 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 698 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 699 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 700 NULL 701 }; 702 703 static QemuOptsList qcow2_runtime_opts = { 704 .name = "qcow2", 705 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 706 .desc = { 707 { 708 .name = QCOW2_OPT_LAZY_REFCOUNTS, 709 .type = QEMU_OPT_BOOL, 710 .help = "Postpone refcount updates", 711 }, 712 { 713 .name = QCOW2_OPT_DISCARD_REQUEST, 714 .type = QEMU_OPT_BOOL, 715 .help = "Pass guest discard requests to the layer below", 716 }, 717 { 718 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 719 .type = QEMU_OPT_BOOL, 720 .help = "Generate discard requests when snapshot related space " 721 "is freed", 722 }, 723 { 724 .name = QCOW2_OPT_DISCARD_OTHER, 725 .type = QEMU_OPT_BOOL, 726 .help = "Generate discard requests when other clusters are freed", 727 }, 728 { 729 .name = QCOW2_OPT_OVERLAP, 730 .type = QEMU_OPT_STRING, 731 .help = "Selects which overlap checks to perform from a range of " 732 "templates (none, constant, cached, all)", 733 }, 734 { 735 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 736 .type = QEMU_OPT_STRING, 737 .help = "Selects which overlap checks to perform from a range of " 738 "templates (none, constant, cached, all)", 739 }, 740 { 741 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 742 .type = QEMU_OPT_BOOL, 743 .help = "Check for unintended writes into the main qcow2 header", 744 }, 745 { 746 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 747 .type = QEMU_OPT_BOOL, 748 .help = "Check for unintended writes into the active L1 table", 749 }, 750 { 751 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 752 .type = QEMU_OPT_BOOL, 753 .help = "Check for unintended writes into an active L2 table", 754 }, 755 { 756 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 757 .type = QEMU_OPT_BOOL, 758 .help = "Check for unintended writes into the refcount table", 759 }, 760 { 761 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 762 .type = QEMU_OPT_BOOL, 763 .help = "Check for unintended writes into a refcount block", 764 }, 765 { 766 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 767 .type = QEMU_OPT_BOOL, 768 .help = "Check for unintended writes into the snapshot table", 769 }, 770 { 771 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 772 .type = QEMU_OPT_BOOL, 773 .help = "Check for unintended writes into an inactive L1 table", 774 }, 775 { 776 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 777 .type = QEMU_OPT_BOOL, 778 .help = "Check for unintended writes into an inactive L2 table", 779 }, 780 { 781 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 782 .type = QEMU_OPT_BOOL, 783 .help = "Check for unintended writes into the bitmap directory", 784 }, 785 { 786 .name = QCOW2_OPT_CACHE_SIZE, 787 .type = QEMU_OPT_SIZE, 788 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 789 "cache size", 790 }, 791 { 792 .name = QCOW2_OPT_L2_CACHE_SIZE, 793 .type = QEMU_OPT_SIZE, 794 .help = "Maximum L2 table cache size", 795 }, 796 { 797 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 798 .type = QEMU_OPT_SIZE, 799 .help = "Size of each entry in the L2 cache", 800 }, 801 { 802 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 803 .type = QEMU_OPT_SIZE, 804 .help = "Maximum refcount block cache size", 805 }, 806 { 807 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 808 .type = QEMU_OPT_NUMBER, 809 .help = "Clean unused cache entries after this time (in seconds)", 810 }, 811 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 812 "ID of secret providing qcow2 AES key or LUKS passphrase"), 813 { /* end of list */ } 814 }, 815 }; 816 817 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 818 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 819 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 820 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 821 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 822 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 823 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 824 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 825 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 826 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 827 }; 828 829 static void cache_clean_timer_cb(void *opaque) 830 { 831 BlockDriverState *bs = opaque; 832 BDRVQcow2State *s = bs->opaque; 833 qcow2_cache_clean_unused(s->l2_table_cache); 834 qcow2_cache_clean_unused(s->refcount_block_cache); 835 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 836 (int64_t) s->cache_clean_interval * 1000); 837 } 838 839 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 840 { 841 BDRVQcow2State *s = bs->opaque; 842 if (s->cache_clean_interval > 0) { 843 s->cache_clean_timer = 844 aio_timer_new_with_attrs(context, QEMU_CLOCK_VIRTUAL, 845 SCALE_MS, QEMU_TIMER_ATTR_EXTERNAL, 846 cache_clean_timer_cb, bs); 847 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 848 (int64_t) s->cache_clean_interval * 1000); 849 } 850 } 851 852 static void cache_clean_timer_del(BlockDriverState *bs) 853 { 854 BDRVQcow2State *s = bs->opaque; 855 if (s->cache_clean_timer) { 856 timer_free(s->cache_clean_timer); 857 s->cache_clean_timer = NULL; 858 } 859 } 860 861 static void qcow2_detach_aio_context(BlockDriverState *bs) 862 { 863 cache_clean_timer_del(bs); 864 } 865 866 static void qcow2_attach_aio_context(BlockDriverState *bs, 867 AioContext *new_context) 868 { 869 cache_clean_timer_init(bs, new_context); 870 } 871 872 static bool read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 873 uint64_t *l2_cache_size, 874 uint64_t *l2_cache_entry_size, 875 uint64_t *refcount_cache_size, Error **errp) 876 { 877 BDRVQcow2State *s = bs->opaque; 878 uint64_t combined_cache_size, l2_cache_max_setting; 879 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 880 bool l2_cache_entry_size_set; 881 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 882 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 883 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 884 /* An L2 table is always one cluster in size so the max cache size 885 * should be a multiple of the cluster size. */ 886 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s), 887 s->cluster_size); 888 889 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 890 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 891 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 892 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 893 894 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 895 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 896 DEFAULT_L2_CACHE_MAX_SIZE); 897 *refcount_cache_size = qemu_opt_get_size(opts, 898 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 899 900 *l2_cache_entry_size = qemu_opt_get_size( 901 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 902 903 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 904 905 if (combined_cache_size_set) { 906 if (l2_cache_size_set && refcount_cache_size_set) { 907 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 908 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 909 "at the same time"); 910 return false; 911 } else if (l2_cache_size_set && 912 (l2_cache_max_setting > combined_cache_size)) { 913 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 914 QCOW2_OPT_CACHE_SIZE); 915 return false; 916 } else if (*refcount_cache_size > combined_cache_size) { 917 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 918 QCOW2_OPT_CACHE_SIZE); 919 return false; 920 } 921 922 if (l2_cache_size_set) { 923 *refcount_cache_size = combined_cache_size - *l2_cache_size; 924 } else if (refcount_cache_size_set) { 925 *l2_cache_size = combined_cache_size - *refcount_cache_size; 926 } else { 927 /* Assign as much memory as possible to the L2 cache, and 928 * use the remainder for the refcount cache */ 929 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 930 *l2_cache_size = max_l2_cache; 931 *refcount_cache_size = combined_cache_size - *l2_cache_size; 932 } else { 933 *refcount_cache_size = 934 MIN(combined_cache_size, min_refcount_cache); 935 *l2_cache_size = combined_cache_size - *refcount_cache_size; 936 } 937 } 938 } 939 940 /* 941 * If the L2 cache is not enough to cover the whole disk then 942 * default to 4KB entries. Smaller entries reduce the cost of 943 * loads and evictions and increase I/O performance. 944 */ 945 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 946 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 947 } 948 949 /* l2_cache_size and refcount_cache_size are ensured to have at least 950 * their minimum values in qcow2_update_options_prepare() */ 951 952 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 953 *l2_cache_entry_size > s->cluster_size || 954 !is_power_of_2(*l2_cache_entry_size)) { 955 error_setg(errp, "L2 cache entry size must be a power of two " 956 "between %d and the cluster size (%d)", 957 1 << MIN_CLUSTER_BITS, s->cluster_size); 958 return false; 959 } 960 961 return true; 962 } 963 964 typedef struct Qcow2ReopenState { 965 Qcow2Cache *l2_table_cache; 966 Qcow2Cache *refcount_block_cache; 967 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 968 bool use_lazy_refcounts; 969 int overlap_check; 970 bool discard_passthrough[QCOW2_DISCARD_MAX]; 971 uint64_t cache_clean_interval; 972 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 973 } Qcow2ReopenState; 974 975 static int qcow2_update_options_prepare(BlockDriverState *bs, 976 Qcow2ReopenState *r, 977 QDict *options, int flags, 978 Error **errp) 979 { 980 BDRVQcow2State *s = bs->opaque; 981 QemuOpts *opts = NULL; 982 const char *opt_overlap_check, *opt_overlap_check_template; 983 int overlap_check_template = 0; 984 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 985 int i; 986 const char *encryptfmt; 987 QDict *encryptopts = NULL; 988 int ret; 989 990 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 991 encryptfmt = qdict_get_try_str(encryptopts, "format"); 992 993 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 994 if (!qemu_opts_absorb_qdict(opts, options, errp)) { 995 ret = -EINVAL; 996 goto fail; 997 } 998 999 /* get L2 table/refcount block cache size from command line options */ 1000 if (!read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 1001 &refcount_cache_size, errp)) { 1002 ret = -EINVAL; 1003 goto fail; 1004 } 1005 1006 l2_cache_size /= l2_cache_entry_size; 1007 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 1008 l2_cache_size = MIN_L2_CACHE_SIZE; 1009 } 1010 if (l2_cache_size > INT_MAX) { 1011 error_setg(errp, "L2 cache size too big"); 1012 ret = -EINVAL; 1013 goto fail; 1014 } 1015 1016 refcount_cache_size /= s->cluster_size; 1017 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 1018 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 1019 } 1020 if (refcount_cache_size > INT_MAX) { 1021 error_setg(errp, "Refcount cache size too big"); 1022 ret = -EINVAL; 1023 goto fail; 1024 } 1025 1026 /* alloc new L2 table/refcount block cache, flush old one */ 1027 if (s->l2_table_cache) { 1028 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1029 if (ret) { 1030 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 1031 goto fail; 1032 } 1033 } 1034 1035 if (s->refcount_block_cache) { 1036 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1037 if (ret) { 1038 error_setg_errno(errp, -ret, 1039 "Failed to flush the refcount block cache"); 1040 goto fail; 1041 } 1042 } 1043 1044 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s); 1045 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 1046 l2_cache_entry_size); 1047 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1048 s->cluster_size); 1049 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1050 error_setg(errp, "Could not allocate metadata caches"); 1051 ret = -ENOMEM; 1052 goto fail; 1053 } 1054 1055 /* New interval for cache cleanup timer */ 1056 r->cache_clean_interval = 1057 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1058 DEFAULT_CACHE_CLEAN_INTERVAL); 1059 #ifndef CONFIG_LINUX 1060 if (r->cache_clean_interval != 0) { 1061 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1062 " not supported on this host"); 1063 ret = -EINVAL; 1064 goto fail; 1065 } 1066 #endif 1067 if (r->cache_clean_interval > UINT_MAX) { 1068 error_setg(errp, "Cache clean interval too big"); 1069 ret = -EINVAL; 1070 goto fail; 1071 } 1072 1073 /* lazy-refcounts; flush if going from enabled to disabled */ 1074 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1075 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1076 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1077 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1078 "qemu 1.1 compatibility level"); 1079 ret = -EINVAL; 1080 goto fail; 1081 } 1082 1083 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1084 ret = qcow2_mark_clean(bs); 1085 if (ret < 0) { 1086 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1087 goto fail; 1088 } 1089 } 1090 1091 /* Overlap check options */ 1092 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1093 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1094 if (opt_overlap_check_template && opt_overlap_check && 1095 strcmp(opt_overlap_check_template, opt_overlap_check)) 1096 { 1097 error_setg(errp, "Conflicting values for qcow2 options '" 1098 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1099 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1100 ret = -EINVAL; 1101 goto fail; 1102 } 1103 if (!opt_overlap_check) { 1104 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1105 } 1106 1107 if (!strcmp(opt_overlap_check, "none")) { 1108 overlap_check_template = 0; 1109 } else if (!strcmp(opt_overlap_check, "constant")) { 1110 overlap_check_template = QCOW2_OL_CONSTANT; 1111 } else if (!strcmp(opt_overlap_check, "cached")) { 1112 overlap_check_template = QCOW2_OL_CACHED; 1113 } else if (!strcmp(opt_overlap_check, "all")) { 1114 overlap_check_template = QCOW2_OL_ALL; 1115 } else { 1116 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1117 "'overlap-check'. Allowed are any of the following: " 1118 "none, constant, cached, all", opt_overlap_check); 1119 ret = -EINVAL; 1120 goto fail; 1121 } 1122 1123 r->overlap_check = 0; 1124 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1125 /* overlap-check defines a template bitmask, but every flag may be 1126 * overwritten through the associated boolean option */ 1127 r->overlap_check |= 1128 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1129 overlap_check_template & (1 << i)) << i; 1130 } 1131 1132 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1133 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1134 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1135 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1136 flags & BDRV_O_UNMAP); 1137 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1138 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1139 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1140 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1141 1142 switch (s->crypt_method_header) { 1143 case QCOW_CRYPT_NONE: 1144 if (encryptfmt) { 1145 error_setg(errp, "No encryption in image header, but options " 1146 "specified format '%s'", encryptfmt); 1147 ret = -EINVAL; 1148 goto fail; 1149 } 1150 break; 1151 1152 case QCOW_CRYPT_AES: 1153 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1154 error_setg(errp, 1155 "Header reported 'aes' encryption format but " 1156 "options specify '%s'", encryptfmt); 1157 ret = -EINVAL; 1158 goto fail; 1159 } 1160 qdict_put_str(encryptopts, "format", "qcow"); 1161 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1162 if (!r->crypto_opts) { 1163 ret = -EINVAL; 1164 goto fail; 1165 } 1166 break; 1167 1168 case QCOW_CRYPT_LUKS: 1169 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1170 error_setg(errp, 1171 "Header reported 'luks' encryption format but " 1172 "options specify '%s'", encryptfmt); 1173 ret = -EINVAL; 1174 goto fail; 1175 } 1176 qdict_put_str(encryptopts, "format", "luks"); 1177 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1178 if (!r->crypto_opts) { 1179 ret = -EINVAL; 1180 goto fail; 1181 } 1182 break; 1183 1184 default: 1185 error_setg(errp, "Unsupported encryption method %d", 1186 s->crypt_method_header); 1187 ret = -EINVAL; 1188 goto fail; 1189 } 1190 1191 ret = 0; 1192 fail: 1193 qobject_unref(encryptopts); 1194 qemu_opts_del(opts); 1195 opts = NULL; 1196 return ret; 1197 } 1198 1199 static void qcow2_update_options_commit(BlockDriverState *bs, 1200 Qcow2ReopenState *r) 1201 { 1202 BDRVQcow2State *s = bs->opaque; 1203 int i; 1204 1205 if (s->l2_table_cache) { 1206 qcow2_cache_destroy(s->l2_table_cache); 1207 } 1208 if (s->refcount_block_cache) { 1209 qcow2_cache_destroy(s->refcount_block_cache); 1210 } 1211 s->l2_table_cache = r->l2_table_cache; 1212 s->refcount_block_cache = r->refcount_block_cache; 1213 s->l2_slice_size = r->l2_slice_size; 1214 1215 s->overlap_check = r->overlap_check; 1216 s->use_lazy_refcounts = r->use_lazy_refcounts; 1217 1218 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1219 s->discard_passthrough[i] = r->discard_passthrough[i]; 1220 } 1221 1222 if (s->cache_clean_interval != r->cache_clean_interval) { 1223 cache_clean_timer_del(bs); 1224 s->cache_clean_interval = r->cache_clean_interval; 1225 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1226 } 1227 1228 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1229 s->crypto_opts = r->crypto_opts; 1230 } 1231 1232 static void qcow2_update_options_abort(BlockDriverState *bs, 1233 Qcow2ReopenState *r) 1234 { 1235 if (r->l2_table_cache) { 1236 qcow2_cache_destroy(r->l2_table_cache); 1237 } 1238 if (r->refcount_block_cache) { 1239 qcow2_cache_destroy(r->refcount_block_cache); 1240 } 1241 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1242 } 1243 1244 static int coroutine_fn 1245 qcow2_update_options(BlockDriverState *bs, QDict *options, int flags, 1246 Error **errp) 1247 { 1248 Qcow2ReopenState r = {}; 1249 int ret; 1250 1251 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1252 if (ret >= 0) { 1253 qcow2_update_options_commit(bs, &r); 1254 } else { 1255 qcow2_update_options_abort(bs, &r); 1256 } 1257 1258 return ret; 1259 } 1260 1261 static int validate_compression_type(BDRVQcow2State *s, Error **errp) 1262 { 1263 switch (s->compression_type) { 1264 case QCOW2_COMPRESSION_TYPE_ZLIB: 1265 #ifdef CONFIG_ZSTD 1266 case QCOW2_COMPRESSION_TYPE_ZSTD: 1267 #endif 1268 break; 1269 1270 default: 1271 error_setg(errp, "qcow2: unknown compression type: %u", 1272 s->compression_type); 1273 return -ENOTSUP; 1274 } 1275 1276 /* 1277 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB 1278 * the incompatible feature flag must be set 1279 */ 1280 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) { 1281 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 1282 error_setg(errp, "qcow2: Compression type incompatible feature " 1283 "bit must not be set"); 1284 return -EINVAL; 1285 } 1286 } else { 1287 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) { 1288 error_setg(errp, "qcow2: Compression type incompatible feature " 1289 "bit must be set"); 1290 return -EINVAL; 1291 } 1292 } 1293 1294 return 0; 1295 } 1296 1297 /* Called with s->lock held. */ 1298 static int coroutine_fn GRAPH_RDLOCK 1299 qcow2_do_open(BlockDriverState *bs, QDict *options, int flags, 1300 bool open_data_file, Error **errp) 1301 { 1302 ERRP_GUARD(); 1303 BDRVQcow2State *s = bs->opaque; 1304 unsigned int len, i; 1305 int ret = 0; 1306 QCowHeader header; 1307 uint64_t ext_end; 1308 uint64_t l1_vm_state_index; 1309 bool update_header = false; 1310 1311 ret = bdrv_co_pread(bs->file, 0, sizeof(header), &header, 0); 1312 if (ret < 0) { 1313 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1314 goto fail; 1315 } 1316 header.magic = be32_to_cpu(header.magic); 1317 header.version = be32_to_cpu(header.version); 1318 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1319 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1320 header.size = be64_to_cpu(header.size); 1321 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1322 header.crypt_method = be32_to_cpu(header.crypt_method); 1323 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1324 header.l1_size = be32_to_cpu(header.l1_size); 1325 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1326 header.refcount_table_clusters = 1327 be32_to_cpu(header.refcount_table_clusters); 1328 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1329 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1330 1331 if (header.magic != QCOW_MAGIC) { 1332 error_setg(errp, "Image is not in qcow2 format"); 1333 ret = -EINVAL; 1334 goto fail; 1335 } 1336 if (header.version < 2 || header.version > 3) { 1337 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1338 ret = -ENOTSUP; 1339 goto fail; 1340 } 1341 1342 s->qcow_version = header.version; 1343 1344 /* Initialise cluster size */ 1345 if (header.cluster_bits < MIN_CLUSTER_BITS || 1346 header.cluster_bits > MAX_CLUSTER_BITS) { 1347 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1348 header.cluster_bits); 1349 ret = -EINVAL; 1350 goto fail; 1351 } 1352 1353 s->cluster_bits = header.cluster_bits; 1354 s->cluster_size = 1 << s->cluster_bits; 1355 1356 /* Initialise version 3 header fields */ 1357 if (header.version == 2) { 1358 header.incompatible_features = 0; 1359 header.compatible_features = 0; 1360 header.autoclear_features = 0; 1361 header.refcount_order = 4; 1362 header.header_length = 72; 1363 } else { 1364 header.incompatible_features = 1365 be64_to_cpu(header.incompatible_features); 1366 header.compatible_features = be64_to_cpu(header.compatible_features); 1367 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1368 header.refcount_order = be32_to_cpu(header.refcount_order); 1369 header.header_length = be32_to_cpu(header.header_length); 1370 1371 if (header.header_length < 104) { 1372 error_setg(errp, "qcow2 header too short"); 1373 ret = -EINVAL; 1374 goto fail; 1375 } 1376 } 1377 1378 if (header.header_length > s->cluster_size) { 1379 error_setg(errp, "qcow2 header exceeds cluster size"); 1380 ret = -EINVAL; 1381 goto fail; 1382 } 1383 1384 if (header.header_length > sizeof(header)) { 1385 s->unknown_header_fields_size = header.header_length - sizeof(header); 1386 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1387 ret = bdrv_co_pread(bs->file, sizeof(header), 1388 s->unknown_header_fields_size, 1389 s->unknown_header_fields, 0); 1390 if (ret < 0) { 1391 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1392 "fields"); 1393 goto fail; 1394 } 1395 } 1396 1397 if (header.backing_file_offset > s->cluster_size) { 1398 error_setg(errp, "Invalid backing file offset"); 1399 ret = -EINVAL; 1400 goto fail; 1401 } 1402 1403 if (header.backing_file_offset) { 1404 ext_end = header.backing_file_offset; 1405 } else { 1406 ext_end = 1 << header.cluster_bits; 1407 } 1408 1409 /* Handle feature bits */ 1410 s->incompatible_features = header.incompatible_features; 1411 s->compatible_features = header.compatible_features; 1412 s->autoclear_features = header.autoclear_features; 1413 1414 /* 1415 * Handle compression type 1416 * Older qcow2 images don't contain the compression type header. 1417 * Distinguish them by the header length and use 1418 * the only valid (default) compression type in that case 1419 */ 1420 if (header.header_length > offsetof(QCowHeader, compression_type)) { 1421 s->compression_type = header.compression_type; 1422 } else { 1423 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 1424 } 1425 1426 ret = validate_compression_type(s, errp); 1427 if (ret) { 1428 goto fail; 1429 } 1430 1431 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1432 void *feature_table = NULL; 1433 qcow2_read_extensions(bs, header.header_length, ext_end, 1434 &feature_table, flags, NULL, NULL); 1435 report_unsupported_feature(errp, feature_table, 1436 s->incompatible_features & 1437 ~QCOW2_INCOMPAT_MASK); 1438 ret = -ENOTSUP; 1439 g_free(feature_table); 1440 goto fail; 1441 } 1442 1443 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1444 /* Corrupt images may not be written to unless they are being repaired 1445 */ 1446 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1447 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1448 "read/write"); 1449 ret = -EACCES; 1450 goto fail; 1451 } 1452 } 1453 1454 s->subclusters_per_cluster = 1455 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1; 1456 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster; 1457 s->subcluster_bits = ctz32(s->subcluster_size); 1458 1459 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) { 1460 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size); 1461 ret = -EINVAL; 1462 goto fail; 1463 } 1464 1465 /* Check support for various header values */ 1466 if (header.refcount_order > 6) { 1467 error_setg(errp, "Reference count entry width too large; may not " 1468 "exceed 64 bits"); 1469 ret = -EINVAL; 1470 goto fail; 1471 } 1472 s->refcount_order = header.refcount_order; 1473 s->refcount_bits = 1 << s->refcount_order; 1474 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1475 s->refcount_max += s->refcount_max - 1; 1476 1477 s->crypt_method_header = header.crypt_method; 1478 if (s->crypt_method_header) { 1479 if (bdrv_uses_whitelist() && 1480 s->crypt_method_header == QCOW_CRYPT_AES) { 1481 error_setg(errp, 1482 "Use of AES-CBC encrypted qcow2 images is no longer " 1483 "supported in system emulators"); 1484 error_append_hint(errp, 1485 "You can use 'qemu-img convert' to convert your " 1486 "image to an alternative supported format, such " 1487 "as unencrypted qcow2, or raw with the LUKS " 1488 "format instead.\n"); 1489 ret = -ENOSYS; 1490 goto fail; 1491 } 1492 1493 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1494 s->crypt_physical_offset = false; 1495 } else { 1496 /* Assuming LUKS and any future crypt methods we 1497 * add will all use physical offsets, due to the 1498 * fact that the alternative is insecure... */ 1499 s->crypt_physical_offset = true; 1500 } 1501 1502 bs->encrypted = true; 1503 } 1504 1505 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s)); 1506 s->l2_size = 1 << s->l2_bits; 1507 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1508 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1509 s->refcount_block_size = 1 << s->refcount_block_bits; 1510 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1511 s->csize_shift = (62 - (s->cluster_bits - 8)); 1512 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1513 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1514 1515 s->refcount_table_offset = header.refcount_table_offset; 1516 s->refcount_table_size = 1517 header.refcount_table_clusters << (s->cluster_bits - 3); 1518 1519 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1520 error_setg(errp, "Image does not contain a reference count table"); 1521 ret = -EINVAL; 1522 goto fail; 1523 } 1524 1525 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1526 header.refcount_table_clusters, 1527 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1528 "Reference count table", errp); 1529 if (ret < 0) { 1530 goto fail; 1531 } 1532 1533 if (!(flags & BDRV_O_CHECK)) { 1534 /* 1535 * The total size in bytes of the snapshot table is checked in 1536 * qcow2_read_snapshots() because the size of each snapshot is 1537 * variable and we don't know it yet. 1538 * Here we only check the offset and number of snapshots. 1539 */ 1540 ret = qcow2_validate_table(bs, header.snapshots_offset, 1541 header.nb_snapshots, 1542 sizeof(QCowSnapshotHeader), 1543 sizeof(QCowSnapshotHeader) * 1544 QCOW_MAX_SNAPSHOTS, 1545 "Snapshot table", errp); 1546 if (ret < 0) { 1547 goto fail; 1548 } 1549 } 1550 1551 /* read the level 1 table */ 1552 ret = qcow2_validate_table(bs, header.l1_table_offset, 1553 header.l1_size, L1E_SIZE, 1554 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1555 if (ret < 0) { 1556 goto fail; 1557 } 1558 s->l1_size = header.l1_size; 1559 s->l1_table_offset = header.l1_table_offset; 1560 1561 l1_vm_state_index = size_to_l1(s, header.size); 1562 if (l1_vm_state_index > INT_MAX) { 1563 error_setg(errp, "Image is too big"); 1564 ret = -EFBIG; 1565 goto fail; 1566 } 1567 s->l1_vm_state_index = l1_vm_state_index; 1568 1569 /* the L1 table must contain at least enough entries to put 1570 header.size bytes */ 1571 if (s->l1_size < s->l1_vm_state_index) { 1572 error_setg(errp, "L1 table is too small"); 1573 ret = -EINVAL; 1574 goto fail; 1575 } 1576 1577 if (s->l1_size > 0) { 1578 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE); 1579 if (s->l1_table == NULL) { 1580 error_setg(errp, "Could not allocate L1 table"); 1581 ret = -ENOMEM; 1582 goto fail; 1583 } 1584 ret = bdrv_co_pread(bs->file, s->l1_table_offset, s->l1_size * L1E_SIZE, 1585 s->l1_table, 0); 1586 if (ret < 0) { 1587 error_setg_errno(errp, -ret, "Could not read L1 table"); 1588 goto fail; 1589 } 1590 for(i = 0;i < s->l1_size; i++) { 1591 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1592 } 1593 } 1594 1595 /* Parse driver-specific options */ 1596 ret = qcow2_update_options(bs, options, flags, errp); 1597 if (ret < 0) { 1598 goto fail; 1599 } 1600 1601 s->flags = flags; 1602 1603 ret = qcow2_refcount_init(bs); 1604 if (ret != 0) { 1605 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1606 goto fail; 1607 } 1608 1609 QLIST_INIT(&s->cluster_allocs); 1610 QTAILQ_INIT(&s->discards); 1611 1612 /* read qcow2 extensions */ 1613 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1614 flags, &update_header, errp)) { 1615 ret = -EINVAL; 1616 goto fail; 1617 } 1618 1619 if (open_data_file) { 1620 /* Open external data file */ 1621 s->data_file = bdrv_co_open_child(NULL, options, "data-file", bs, 1622 &child_of_bds, BDRV_CHILD_DATA, 1623 true, errp); 1624 if (*errp) { 1625 ret = -EINVAL; 1626 goto fail; 1627 } 1628 1629 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1630 if (!s->data_file && s->image_data_file) { 1631 s->data_file = bdrv_co_open_child(s->image_data_file, options, 1632 "data-file", bs, 1633 &child_of_bds, 1634 BDRV_CHILD_DATA, false, errp); 1635 if (!s->data_file) { 1636 ret = -EINVAL; 1637 goto fail; 1638 } 1639 } 1640 if (!s->data_file) { 1641 error_setg(errp, "'data-file' is required for this image"); 1642 ret = -EINVAL; 1643 goto fail; 1644 } 1645 1646 /* No data here */ 1647 bs->file->role &= ~BDRV_CHILD_DATA; 1648 1649 /* Must succeed because we have given up permissions if anything */ 1650 bdrv_child_refresh_perms(bs, bs->file, &error_abort); 1651 } else { 1652 if (s->data_file) { 1653 error_setg(errp, "'data-file' can only be set for images with " 1654 "an external data file"); 1655 ret = -EINVAL; 1656 goto fail; 1657 } 1658 1659 s->data_file = bs->file; 1660 1661 if (data_file_is_raw(bs)) { 1662 error_setg(errp, "data-file-raw requires a data file"); 1663 ret = -EINVAL; 1664 goto fail; 1665 } 1666 } 1667 } 1668 1669 /* qcow2_read_extension may have set up the crypto context 1670 * if the crypt method needs a header region, some methods 1671 * don't need header extensions, so must check here 1672 */ 1673 if (s->crypt_method_header && !s->crypto) { 1674 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1675 unsigned int cflags = 0; 1676 if (flags & BDRV_O_NO_IO) { 1677 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1678 } 1679 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1680 NULL, NULL, cflags, 1681 QCOW2_MAX_THREADS, errp); 1682 if (!s->crypto) { 1683 ret = -EINVAL; 1684 goto fail; 1685 } 1686 } else if (!(flags & BDRV_O_NO_IO)) { 1687 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1688 s->crypt_method_header); 1689 ret = -EINVAL; 1690 goto fail; 1691 } 1692 } 1693 1694 /* read the backing file name */ 1695 if (header.backing_file_offset != 0) { 1696 len = header.backing_file_size; 1697 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1698 len >= sizeof(bs->backing_file)) { 1699 error_setg(errp, "Backing file name too long"); 1700 ret = -EINVAL; 1701 goto fail; 1702 } 1703 1704 s->image_backing_file = g_malloc(len + 1); 1705 ret = bdrv_co_pread(bs->file, header.backing_file_offset, len, 1706 s->image_backing_file, 0); 1707 if (ret < 0) { 1708 error_setg_errno(errp, -ret, "Could not read backing file name"); 1709 goto fail; 1710 } 1711 s->image_backing_file[len] = '\0'; 1712 1713 /* 1714 * Update only when something has changed. This function is called by 1715 * qcow2_co_invalidate_cache(), and we do not want to reset 1716 * auto_backing_file unless necessary. 1717 */ 1718 if (!g_str_equal(s->image_backing_file, bs->backing_file)) { 1719 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1720 s->image_backing_file); 1721 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 1722 s->image_backing_file); 1723 } 1724 } 1725 1726 /* 1727 * Internal snapshots; skip reading them in check mode, because 1728 * we do not need them then, and we do not want to abort because 1729 * of a broken table. 1730 */ 1731 if (!(flags & BDRV_O_CHECK)) { 1732 s->snapshots_offset = header.snapshots_offset; 1733 s->nb_snapshots = header.nb_snapshots; 1734 1735 ret = qcow2_read_snapshots(bs, errp); 1736 if (ret < 0) { 1737 goto fail; 1738 } 1739 } 1740 1741 /* Clear unknown autoclear feature bits */ 1742 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1743 update_header = update_header && bdrv_is_writable(bs); 1744 if (update_header) { 1745 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1746 } 1747 1748 /* == Handle persistent dirty bitmaps == 1749 * 1750 * We want load dirty bitmaps in three cases: 1751 * 1752 * 1. Normal open of the disk in active mode, not related to invalidation 1753 * after migration. 1754 * 1755 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1756 * bitmaps are _not_ migrating through migration channel, i.e. 1757 * 'dirty-bitmaps' capability is disabled. 1758 * 1759 * 3. Invalidation of source vm after failed or canceled migration. 1760 * This is a very interesting case. There are two possible types of 1761 * bitmaps: 1762 * 1763 * A. Stored on inactivation and removed. They should be loaded from the 1764 * image. 1765 * 1766 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1767 * the migration channel (with dirty-bitmaps capability). 1768 * 1769 * On the other hand, there are two possible sub-cases: 1770 * 1771 * 3.1 disk was changed by somebody else while were inactive. In this 1772 * case all in-RAM dirty bitmaps (both persistent and not) are 1773 * definitely invalid. And we don't have any method to determine 1774 * this. 1775 * 1776 * Simple and safe thing is to just drop all the bitmaps of type B on 1777 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1778 * 1779 * On the other hand, resuming source vm, if disk was already changed 1780 * is a bad thing anyway: not only bitmaps, the whole vm state is 1781 * out of sync with disk. 1782 * 1783 * This means, that user or management tool, who for some reason 1784 * decided to resume source vm, after disk was already changed by 1785 * target vm, should at least drop all dirty bitmaps by hand. 1786 * 1787 * So, we can ignore this case for now, but TODO: "generation" 1788 * extension for qcow2, to determine, that image was changed after 1789 * last inactivation. And if it is changed, we will drop (or at least 1790 * mark as 'invalid' all the bitmaps of type B, both persistent 1791 * and not). 1792 * 1793 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1794 * to disk ('dirty-bitmaps' capability disabled), or not saved 1795 * ('dirty-bitmaps' capability enabled), but we don't need to care 1796 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1797 * and not stored has flag IN_USE=1 in the image and will be skipped 1798 * on loading. 1799 * 1800 * One remaining possible case when we don't want load bitmaps: 1801 * 1802 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1803 * will be loaded on invalidation, no needs try loading them before) 1804 */ 1805 1806 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1807 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1808 bool header_updated; 1809 if (!qcow2_load_dirty_bitmaps(bs, &header_updated, errp)) { 1810 ret = -EINVAL; 1811 goto fail; 1812 } 1813 1814 update_header = update_header && !header_updated; 1815 } 1816 1817 if (update_header) { 1818 ret = qcow2_update_header(bs); 1819 if (ret < 0) { 1820 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1821 goto fail; 1822 } 1823 } 1824 1825 bs->supported_zero_flags = header.version >= 3 ? 1826 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0; 1827 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 1828 1829 /* Repair image if dirty */ 1830 if (!(flags & BDRV_O_CHECK) && bdrv_is_writable(bs) && 1831 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1832 BdrvCheckResult result = {0}; 1833 1834 ret = qcow2_co_check_locked(bs, &result, 1835 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1836 if (ret < 0 || result.check_errors) { 1837 if (ret >= 0) { 1838 ret = -EIO; 1839 } 1840 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1841 goto fail; 1842 } 1843 } 1844 1845 #ifdef DEBUG_ALLOC 1846 { 1847 BdrvCheckResult result = {0}; 1848 qcow2_check_refcounts(bs, &result, 0); 1849 } 1850 #endif 1851 1852 qemu_co_queue_init(&s->thread_task_queue); 1853 1854 return ret; 1855 1856 fail: 1857 g_free(s->image_data_file); 1858 if (open_data_file && has_data_file(bs)) { 1859 bdrv_unref_child(bs, s->data_file); 1860 s->data_file = NULL; 1861 } 1862 g_free(s->unknown_header_fields); 1863 cleanup_unknown_header_ext(bs); 1864 qcow2_free_snapshots(bs); 1865 qcow2_refcount_close(bs); 1866 qemu_vfree(s->l1_table); 1867 /* else pre-write overlap checks in cache_destroy may crash */ 1868 s->l1_table = NULL; 1869 cache_clean_timer_del(bs); 1870 if (s->l2_table_cache) { 1871 qcow2_cache_destroy(s->l2_table_cache); 1872 } 1873 if (s->refcount_block_cache) { 1874 qcow2_cache_destroy(s->refcount_block_cache); 1875 } 1876 qcrypto_block_free(s->crypto); 1877 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1878 return ret; 1879 } 1880 1881 typedef struct QCow2OpenCo { 1882 BlockDriverState *bs; 1883 QDict *options; 1884 int flags; 1885 Error **errp; 1886 int ret; 1887 } QCow2OpenCo; 1888 1889 static void coroutine_fn qcow2_open_entry(void *opaque) 1890 { 1891 QCow2OpenCo *qoc = opaque; 1892 BDRVQcow2State *s = qoc->bs->opaque; 1893 1894 GRAPH_RDLOCK_GUARD(); 1895 1896 qemu_co_mutex_lock(&s->lock); 1897 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, 1898 qoc->errp); 1899 qemu_co_mutex_unlock(&s->lock); 1900 } 1901 1902 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1903 Error **errp) 1904 { 1905 BDRVQcow2State *s = bs->opaque; 1906 QCow2OpenCo qoc = { 1907 .bs = bs, 1908 .options = options, 1909 .flags = flags, 1910 .errp = errp, 1911 .ret = -EINPROGRESS 1912 }; 1913 int ret; 1914 1915 ret = bdrv_open_file_child(NULL, options, "file", bs, errp); 1916 if (ret < 0) { 1917 return ret; 1918 } 1919 1920 /* Initialise locks */ 1921 qemu_co_mutex_init(&s->lock); 1922 1923 assert(!qemu_in_coroutine()); 1924 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1925 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1926 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1927 1928 return qoc.ret; 1929 } 1930 1931 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1932 { 1933 BDRVQcow2State *s = bs->opaque; 1934 1935 if (bs->encrypted) { 1936 /* Encryption works on a sector granularity */ 1937 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1938 } 1939 bs->bl.pwrite_zeroes_alignment = s->subcluster_size; 1940 bs->bl.pdiscard_alignment = s->cluster_size; 1941 } 1942 1943 static int qcow2_reopen_prepare(BDRVReopenState *state, 1944 BlockReopenQueue *queue, Error **errp) 1945 { 1946 BDRVQcow2State *s = state->bs->opaque; 1947 Qcow2ReopenState *r; 1948 int ret; 1949 1950 r = g_new0(Qcow2ReopenState, 1); 1951 state->opaque = r; 1952 1953 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1954 state->flags, errp); 1955 if (ret < 0) { 1956 goto fail; 1957 } 1958 1959 /* We need to write out any unwritten data if we reopen read-only. */ 1960 if ((state->flags & BDRV_O_RDWR) == 0) { 1961 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1962 if (ret < 0) { 1963 goto fail; 1964 } 1965 1966 ret = bdrv_flush(state->bs); 1967 if (ret < 0) { 1968 goto fail; 1969 } 1970 1971 ret = qcow2_mark_clean(state->bs); 1972 if (ret < 0) { 1973 goto fail; 1974 } 1975 } 1976 1977 /* 1978 * Without an external data file, s->data_file points to the same BdrvChild 1979 * as bs->file. It needs to be resynced after reopen because bs->file may 1980 * be changed. We can't use it in the meantime. 1981 */ 1982 if (!has_data_file(state->bs)) { 1983 assert(s->data_file == state->bs->file); 1984 s->data_file = NULL; 1985 } 1986 1987 return 0; 1988 1989 fail: 1990 qcow2_update_options_abort(state->bs, r); 1991 g_free(r); 1992 return ret; 1993 } 1994 1995 static void qcow2_reopen_commit(BDRVReopenState *state) 1996 { 1997 BDRVQcow2State *s = state->bs->opaque; 1998 1999 qcow2_update_options_commit(state->bs, state->opaque); 2000 if (!s->data_file) { 2001 /* 2002 * If we don't have an external data file, s->data_file was cleared by 2003 * qcow2_reopen_prepare() and needs to be updated. 2004 */ 2005 s->data_file = state->bs->file; 2006 } 2007 g_free(state->opaque); 2008 } 2009 2010 static void qcow2_reopen_commit_post(BDRVReopenState *state) 2011 { 2012 if (state->flags & BDRV_O_RDWR) { 2013 Error *local_err = NULL; 2014 2015 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 2016 /* 2017 * This is not fatal, bitmaps just left read-only, so all following 2018 * writes will fail. User can remove read-only bitmaps to unblock 2019 * writes or retry reopen. 2020 */ 2021 error_reportf_err(local_err, 2022 "%s: Failed to make dirty bitmaps writable: ", 2023 bdrv_get_node_name(state->bs)); 2024 } 2025 } 2026 } 2027 2028 static void qcow2_reopen_abort(BDRVReopenState *state) 2029 { 2030 BDRVQcow2State *s = state->bs->opaque; 2031 2032 if (!s->data_file) { 2033 /* 2034 * If we don't have an external data file, s->data_file was cleared by 2035 * qcow2_reopen_prepare() and needs to be restored. 2036 */ 2037 s->data_file = state->bs->file; 2038 } 2039 qcow2_update_options_abort(state->bs, state->opaque); 2040 g_free(state->opaque); 2041 } 2042 2043 static void qcow2_join_options(QDict *options, QDict *old_options) 2044 { 2045 bool has_new_overlap_template = 2046 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 2047 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 2048 bool has_new_total_cache_size = 2049 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 2050 bool has_all_cache_options; 2051 2052 /* New overlap template overrides all old overlap options */ 2053 if (has_new_overlap_template) { 2054 qdict_del(old_options, QCOW2_OPT_OVERLAP); 2055 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 2056 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 2057 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 2058 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 2059 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 2060 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 2061 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 2062 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 2063 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 2064 } 2065 2066 /* New total cache size overrides all old options */ 2067 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 2068 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 2069 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2070 } 2071 2072 qdict_join(options, old_options, false); 2073 2074 /* 2075 * If after merging all cache size options are set, an old total size is 2076 * overwritten. Do keep all options, however, if all three are new. The 2077 * resulting error message is what we want to happen. 2078 */ 2079 has_all_cache_options = 2080 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 2081 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 2082 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 2083 2084 if (has_all_cache_options && !has_new_total_cache_size) { 2085 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 2086 } 2087 } 2088 2089 static int coroutine_fn GRAPH_RDLOCK 2090 qcow2_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, 2091 int64_t count, int64_t *pnum, int64_t *map, 2092 BlockDriverState **file) 2093 { 2094 BDRVQcow2State *s = bs->opaque; 2095 uint64_t host_offset; 2096 unsigned int bytes; 2097 QCow2SubclusterType type; 2098 int ret, status = 0; 2099 2100 qemu_co_mutex_lock(&s->lock); 2101 2102 if (!s->metadata_preallocation_checked) { 2103 ret = qcow2_detect_metadata_preallocation(bs); 2104 s->metadata_preallocation = (ret == 1); 2105 s->metadata_preallocation_checked = true; 2106 } 2107 2108 bytes = MIN(INT_MAX, count); 2109 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type); 2110 qemu_co_mutex_unlock(&s->lock); 2111 if (ret < 0) { 2112 return ret; 2113 } 2114 2115 *pnum = bytes; 2116 2117 if ((type == QCOW2_SUBCLUSTER_NORMAL || 2118 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2119 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) { 2120 *map = host_offset; 2121 *file = s->data_file->bs; 2122 status |= BDRV_BLOCK_OFFSET_VALID; 2123 } 2124 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2125 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) { 2126 status |= BDRV_BLOCK_ZERO; 2127 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 2128 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) { 2129 status |= BDRV_BLOCK_DATA; 2130 } 2131 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 2132 (status & BDRV_BLOCK_OFFSET_VALID)) 2133 { 2134 status |= BDRV_BLOCK_RECURSE; 2135 } 2136 return status; 2137 } 2138 2139 static int coroutine_fn GRAPH_RDLOCK 2140 qcow2_handle_l2meta(BlockDriverState *bs, QCowL2Meta **pl2meta, bool link_l2) 2141 { 2142 int ret = 0; 2143 QCowL2Meta *l2meta = *pl2meta; 2144 2145 while (l2meta != NULL) { 2146 QCowL2Meta *next; 2147 2148 if (link_l2) { 2149 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2150 if (ret) { 2151 goto out; 2152 } 2153 } else { 2154 qcow2_alloc_cluster_abort(bs, l2meta); 2155 } 2156 2157 /* Take the request off the list of running requests */ 2158 QLIST_REMOVE(l2meta, next_in_flight); 2159 2160 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2161 2162 next = l2meta->next; 2163 g_free(l2meta); 2164 l2meta = next; 2165 } 2166 out: 2167 *pl2meta = l2meta; 2168 return ret; 2169 } 2170 2171 static int coroutine_fn GRAPH_RDLOCK 2172 qcow2_co_preadv_encrypted(BlockDriverState *bs, 2173 uint64_t host_offset, 2174 uint64_t offset, 2175 uint64_t bytes, 2176 QEMUIOVector *qiov, 2177 uint64_t qiov_offset) 2178 { 2179 int ret; 2180 BDRVQcow2State *s = bs->opaque; 2181 uint8_t *buf; 2182 2183 assert(bs->encrypted && s->crypto); 2184 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2185 2186 /* 2187 * For encrypted images, read everything into a temporary 2188 * contiguous buffer on which the AES functions can work. 2189 * Also, decryption in a separate buffer is better as it 2190 * prevents the guest from learning information about the 2191 * encrypted nature of the virtual disk. 2192 */ 2193 2194 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2195 if (buf == NULL) { 2196 return -ENOMEM; 2197 } 2198 2199 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2200 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0); 2201 if (ret < 0) { 2202 goto fail; 2203 } 2204 2205 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0) 2206 { 2207 ret = -EIO; 2208 goto fail; 2209 } 2210 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2211 2212 fail: 2213 qemu_vfree(buf); 2214 2215 return ret; 2216 } 2217 2218 typedef struct Qcow2AioTask { 2219 AioTask task; 2220 2221 BlockDriverState *bs; 2222 QCow2SubclusterType subcluster_type; /* only for read */ 2223 uint64_t host_offset; /* or l2_entry for compressed read */ 2224 uint64_t offset; 2225 uint64_t bytes; 2226 QEMUIOVector *qiov; 2227 uint64_t qiov_offset; 2228 QCowL2Meta *l2meta; /* only for write */ 2229 } Qcow2AioTask; 2230 2231 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2232 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2233 AioTaskPool *pool, 2234 AioTaskFunc func, 2235 QCow2SubclusterType subcluster_type, 2236 uint64_t host_offset, 2237 uint64_t offset, 2238 uint64_t bytes, 2239 QEMUIOVector *qiov, 2240 size_t qiov_offset, 2241 QCowL2Meta *l2meta) 2242 { 2243 Qcow2AioTask local_task; 2244 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2245 2246 *task = (Qcow2AioTask) { 2247 .task.func = func, 2248 .bs = bs, 2249 .subcluster_type = subcluster_type, 2250 .qiov = qiov, 2251 .host_offset = host_offset, 2252 .offset = offset, 2253 .bytes = bytes, 2254 .qiov_offset = qiov_offset, 2255 .l2meta = l2meta, 2256 }; 2257 2258 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2259 func == qcow2_co_preadv_task_entry ? "read" : "write", 2260 subcluster_type, host_offset, offset, bytes, 2261 qiov, qiov_offset); 2262 2263 if (!pool) { 2264 return func(&task->task); 2265 } 2266 2267 aio_task_pool_start_task(pool, &task->task); 2268 2269 return 0; 2270 } 2271 2272 static int coroutine_fn GRAPH_RDLOCK 2273 qcow2_co_preadv_task(BlockDriverState *bs, QCow2SubclusterType subc_type, 2274 uint64_t host_offset, uint64_t offset, uint64_t bytes, 2275 QEMUIOVector *qiov, size_t qiov_offset) 2276 { 2277 BDRVQcow2State *s = bs->opaque; 2278 2279 switch (subc_type) { 2280 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 2281 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 2282 /* Both zero types are handled in qcow2_co_preadv_part */ 2283 g_assert_not_reached(); 2284 2285 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 2286 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 2287 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2288 2289 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2290 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2291 qiov, qiov_offset, 0); 2292 2293 case QCOW2_SUBCLUSTER_COMPRESSED: 2294 return qcow2_co_preadv_compressed(bs, host_offset, 2295 offset, bytes, qiov, qiov_offset); 2296 2297 case QCOW2_SUBCLUSTER_NORMAL: 2298 if (bs->encrypted) { 2299 return qcow2_co_preadv_encrypted(bs, host_offset, 2300 offset, bytes, qiov, qiov_offset); 2301 } 2302 2303 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2304 return bdrv_co_preadv_part(s->data_file, host_offset, 2305 bytes, qiov, qiov_offset, 0); 2306 2307 default: 2308 g_assert_not_reached(); 2309 } 2310 2311 g_assert_not_reached(); 2312 } 2313 2314 /* 2315 * This function can count as GRAPH_RDLOCK because qcow2_co_preadv_part() holds 2316 * the graph lock and keeps it until this coroutine has terminated. 2317 */ 2318 static int coroutine_fn GRAPH_RDLOCK qcow2_co_preadv_task_entry(AioTask *task) 2319 { 2320 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2321 2322 assert(!t->l2meta); 2323 2324 return qcow2_co_preadv_task(t->bs, t->subcluster_type, 2325 t->host_offset, t->offset, t->bytes, 2326 t->qiov, t->qiov_offset); 2327 } 2328 2329 static int coroutine_fn GRAPH_RDLOCK 2330 qcow2_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2331 QEMUIOVector *qiov, size_t qiov_offset, 2332 BdrvRequestFlags flags) 2333 { 2334 BDRVQcow2State *s = bs->opaque; 2335 int ret = 0; 2336 unsigned int cur_bytes; /* number of bytes in current iteration */ 2337 uint64_t host_offset = 0; 2338 QCow2SubclusterType type; 2339 AioTaskPool *aio = NULL; 2340 2341 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2342 /* prepare next request */ 2343 cur_bytes = MIN(bytes, INT_MAX); 2344 if (s->crypto) { 2345 cur_bytes = MIN(cur_bytes, 2346 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2347 } 2348 2349 qemu_co_mutex_lock(&s->lock); 2350 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, 2351 &host_offset, &type); 2352 qemu_co_mutex_unlock(&s->lock); 2353 if (ret < 0) { 2354 goto out; 2355 } 2356 2357 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 2358 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 2359 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) || 2360 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing)) 2361 { 2362 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2363 } else { 2364 if (!aio && cur_bytes != bytes) { 2365 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2366 } 2367 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type, 2368 host_offset, offset, cur_bytes, 2369 qiov, qiov_offset, NULL); 2370 if (ret < 0) { 2371 goto out; 2372 } 2373 } 2374 2375 bytes -= cur_bytes; 2376 offset += cur_bytes; 2377 qiov_offset += cur_bytes; 2378 } 2379 2380 out: 2381 if (aio) { 2382 aio_task_pool_wait_all(aio); 2383 if (ret == 0) { 2384 ret = aio_task_pool_status(aio); 2385 } 2386 g_free(aio); 2387 } 2388 2389 return ret; 2390 } 2391 2392 /* Check if it's possible to merge a write request with the writing of 2393 * the data from the COW regions */ 2394 static bool merge_cow(uint64_t offset, unsigned bytes, 2395 QEMUIOVector *qiov, size_t qiov_offset, 2396 QCowL2Meta *l2meta) 2397 { 2398 QCowL2Meta *m; 2399 2400 for (m = l2meta; m != NULL; m = m->next) { 2401 /* If both COW regions are empty then there's nothing to merge */ 2402 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2403 continue; 2404 } 2405 2406 /* If COW regions are handled already, skip this too */ 2407 if (m->skip_cow) { 2408 continue; 2409 } 2410 2411 /* 2412 * The write request should start immediately after the first 2413 * COW region. This does not always happen because the area 2414 * touched by the request can be larger than the one defined 2415 * by @m (a single request can span an area consisting of a 2416 * mix of previously unallocated and allocated clusters, that 2417 * is why @l2meta is a list). 2418 */ 2419 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2420 /* In this case the request starts before this region */ 2421 assert(offset < l2meta_cow_start(m)); 2422 assert(m->cow_start.nb_bytes == 0); 2423 continue; 2424 } 2425 2426 /* The write request should end immediately before the second 2427 * COW region (see above for why it does not always happen) */ 2428 if (m->offset + m->cow_end.offset != offset + bytes) { 2429 assert(offset + bytes > m->offset + m->cow_end.offset); 2430 assert(m->cow_end.nb_bytes == 0); 2431 continue; 2432 } 2433 2434 /* Make sure that adding both COW regions to the QEMUIOVector 2435 * does not exceed IOV_MAX */ 2436 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2437 continue; 2438 } 2439 2440 m->data_qiov = qiov; 2441 m->data_qiov_offset = qiov_offset; 2442 return true; 2443 } 2444 2445 return false; 2446 } 2447 2448 /* 2449 * Return 1 if the COW regions read as zeroes, 0 if not, < 0 on error. 2450 * Note that returning 0 does not guarantee non-zero data. 2451 */ 2452 static int coroutine_fn GRAPH_RDLOCK 2453 is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2454 { 2455 /* 2456 * This check is designed for optimization shortcut so it must be 2457 * efficient. 2458 * Instead of is_zero(), use bdrv_co_is_zero_fast() as it is 2459 * faster (but not as accurate and can result in false negatives). 2460 */ 2461 int ret = bdrv_co_is_zero_fast(bs, m->offset + m->cow_start.offset, 2462 m->cow_start.nb_bytes); 2463 if (ret <= 0) { 2464 return ret; 2465 } 2466 2467 return bdrv_co_is_zero_fast(bs, m->offset + m->cow_end.offset, 2468 m->cow_end.nb_bytes); 2469 } 2470 2471 static int coroutine_fn GRAPH_RDLOCK 2472 handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2473 { 2474 BDRVQcow2State *s = bs->opaque; 2475 QCowL2Meta *m; 2476 2477 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2478 return 0; 2479 } 2480 2481 if (bs->encrypted) { 2482 return 0; 2483 } 2484 2485 for (m = l2meta; m != NULL; m = m->next) { 2486 int ret; 2487 uint64_t start_offset = m->alloc_offset + m->cow_start.offset; 2488 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes - 2489 m->cow_start.offset; 2490 2491 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2492 continue; 2493 } 2494 2495 ret = is_zero_cow(bs, m); 2496 if (ret < 0) { 2497 return ret; 2498 } else if (ret == 0) { 2499 continue; 2500 } 2501 2502 /* 2503 * instead of writing zero COW buffers, 2504 * efficiently zero out the whole clusters 2505 */ 2506 2507 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes, 2508 true); 2509 if (ret < 0) { 2510 return ret; 2511 } 2512 2513 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2514 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes, 2515 BDRV_REQ_NO_FALLBACK); 2516 if (ret < 0) { 2517 if (ret != -ENOTSUP && ret != -EAGAIN) { 2518 return ret; 2519 } 2520 continue; 2521 } 2522 2523 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2524 m->skip_cow = true; 2525 } 2526 return 0; 2527 } 2528 2529 /* 2530 * qcow2_co_pwritev_task 2531 * Called with s->lock unlocked 2532 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2533 * not use it somehow after qcow2_co_pwritev_task() call 2534 */ 2535 static coroutine_fn GRAPH_RDLOCK 2536 int qcow2_co_pwritev_task(BlockDriverState *bs, uint64_t host_offset, 2537 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, 2538 uint64_t qiov_offset, QCowL2Meta *l2meta) 2539 { 2540 int ret; 2541 BDRVQcow2State *s = bs->opaque; 2542 void *crypt_buf = NULL; 2543 QEMUIOVector encrypted_qiov; 2544 2545 if (bs->encrypted) { 2546 assert(s->crypto); 2547 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2548 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2549 if (crypt_buf == NULL) { 2550 ret = -ENOMEM; 2551 goto out_unlocked; 2552 } 2553 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2554 2555 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) { 2556 ret = -EIO; 2557 goto out_unlocked; 2558 } 2559 2560 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2561 qiov = &encrypted_qiov; 2562 qiov_offset = 0; 2563 } 2564 2565 /* Try to efficiently initialize the physical space with zeroes */ 2566 ret = handle_alloc_space(bs, l2meta); 2567 if (ret < 0) { 2568 goto out_unlocked; 2569 } 2570 2571 /* 2572 * If we need to do COW, check if it's possible to merge the 2573 * writing of the guest data together with that of the COW regions. 2574 * If it's not possible (or not necessary) then write the 2575 * guest data now. 2576 */ 2577 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2578 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2579 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset); 2580 ret = bdrv_co_pwritev_part(s->data_file, host_offset, 2581 bytes, qiov, qiov_offset, 0); 2582 if (ret < 0) { 2583 goto out_unlocked; 2584 } 2585 } 2586 2587 qemu_co_mutex_lock(&s->lock); 2588 2589 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2590 goto out_locked; 2591 2592 out_unlocked: 2593 qemu_co_mutex_lock(&s->lock); 2594 2595 out_locked: 2596 qcow2_handle_l2meta(bs, &l2meta, false); 2597 qemu_co_mutex_unlock(&s->lock); 2598 2599 qemu_vfree(crypt_buf); 2600 2601 return ret; 2602 } 2603 2604 /* 2605 * This function can count as GRAPH_RDLOCK because qcow2_co_pwritev_part() holds 2606 * the graph lock and keeps it until this coroutine has terminated. 2607 */ 2608 static coroutine_fn GRAPH_RDLOCK int qcow2_co_pwritev_task_entry(AioTask *task) 2609 { 2610 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2611 2612 assert(!t->subcluster_type); 2613 2614 return qcow2_co_pwritev_task(t->bs, t->host_offset, 2615 t->offset, t->bytes, t->qiov, t->qiov_offset, 2616 t->l2meta); 2617 } 2618 2619 static int coroutine_fn GRAPH_RDLOCK 2620 qcow2_co_pwritev_part(BlockDriverState *bs, int64_t offset, int64_t bytes, 2621 QEMUIOVector *qiov, size_t qiov_offset, 2622 BdrvRequestFlags flags) 2623 { 2624 BDRVQcow2State *s = bs->opaque; 2625 int offset_in_cluster; 2626 int ret; 2627 unsigned int cur_bytes; /* number of sectors in current iteration */ 2628 uint64_t host_offset; 2629 QCowL2Meta *l2meta = NULL; 2630 AioTaskPool *aio = NULL; 2631 2632 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2633 2634 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2635 2636 l2meta = NULL; 2637 2638 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2639 offset_in_cluster = offset_into_cluster(s, offset); 2640 cur_bytes = MIN(bytes, INT_MAX); 2641 if (bs->encrypted) { 2642 cur_bytes = MIN(cur_bytes, 2643 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2644 - offset_in_cluster); 2645 } 2646 2647 qemu_co_mutex_lock(&s->lock); 2648 2649 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 2650 &host_offset, &l2meta); 2651 if (ret < 0) { 2652 goto out_locked; 2653 } 2654 2655 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, 2656 cur_bytes, true); 2657 if (ret < 0) { 2658 goto out_locked; 2659 } 2660 2661 qemu_co_mutex_unlock(&s->lock); 2662 2663 if (!aio && cur_bytes != bytes) { 2664 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2665 } 2666 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2667 host_offset, offset, 2668 cur_bytes, qiov, qiov_offset, l2meta); 2669 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2670 if (ret < 0) { 2671 goto fail_nometa; 2672 } 2673 2674 bytes -= cur_bytes; 2675 offset += cur_bytes; 2676 qiov_offset += cur_bytes; 2677 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2678 } 2679 ret = 0; 2680 2681 qemu_co_mutex_lock(&s->lock); 2682 2683 out_locked: 2684 qcow2_handle_l2meta(bs, &l2meta, false); 2685 2686 qemu_co_mutex_unlock(&s->lock); 2687 2688 fail_nometa: 2689 if (aio) { 2690 aio_task_pool_wait_all(aio); 2691 if (ret == 0) { 2692 ret = aio_task_pool_status(aio); 2693 } 2694 g_free(aio); 2695 } 2696 2697 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2698 2699 return ret; 2700 } 2701 2702 static int qcow2_inactivate(BlockDriverState *bs) 2703 { 2704 BDRVQcow2State *s = bs->opaque; 2705 int ret, result = 0; 2706 Error *local_err = NULL; 2707 2708 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2709 if (local_err != NULL) { 2710 result = -EINVAL; 2711 error_reportf_err(local_err, "Lost persistent bitmaps during " 2712 "inactivation of node '%s': ", 2713 bdrv_get_device_or_node_name(bs)); 2714 } 2715 2716 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2717 if (ret) { 2718 result = ret; 2719 error_report("Failed to flush the L2 table cache: %s", 2720 strerror(-ret)); 2721 } 2722 2723 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2724 if (ret) { 2725 result = ret; 2726 error_report("Failed to flush the refcount block cache: %s", 2727 strerror(-ret)); 2728 } 2729 2730 if (result == 0) { 2731 qcow2_mark_clean(bs); 2732 } 2733 2734 return result; 2735 } 2736 2737 static void qcow2_do_close(BlockDriverState *bs, bool close_data_file) 2738 { 2739 BDRVQcow2State *s = bs->opaque; 2740 qemu_vfree(s->l1_table); 2741 /* else pre-write overlap checks in cache_destroy may crash */ 2742 s->l1_table = NULL; 2743 2744 if (!(s->flags & BDRV_O_INACTIVE)) { 2745 qcow2_inactivate(bs); 2746 } 2747 2748 cache_clean_timer_del(bs); 2749 qcow2_cache_destroy(s->l2_table_cache); 2750 qcow2_cache_destroy(s->refcount_block_cache); 2751 2752 qcrypto_block_free(s->crypto); 2753 s->crypto = NULL; 2754 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 2755 2756 g_free(s->unknown_header_fields); 2757 cleanup_unknown_header_ext(bs); 2758 2759 g_free(s->image_data_file); 2760 g_free(s->image_backing_file); 2761 g_free(s->image_backing_format); 2762 2763 if (close_data_file && has_data_file(bs)) { 2764 bdrv_unref_child(bs, s->data_file); 2765 s->data_file = NULL; 2766 } 2767 2768 qcow2_refcount_close(bs); 2769 qcow2_free_snapshots(bs); 2770 } 2771 2772 static void qcow2_close(BlockDriverState *bs) 2773 { 2774 qcow2_do_close(bs, true); 2775 } 2776 2777 static void coroutine_fn GRAPH_RDLOCK 2778 qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp) 2779 { 2780 ERRP_GUARD(); 2781 BDRVQcow2State *s = bs->opaque; 2782 BdrvChild *data_file; 2783 int flags = s->flags; 2784 QCryptoBlock *crypto = NULL; 2785 QDict *options; 2786 int ret; 2787 2788 /* 2789 * Backing files are read-only which makes all of their metadata immutable, 2790 * that means we don't have to worry about reopening them here. 2791 */ 2792 2793 crypto = s->crypto; 2794 s->crypto = NULL; 2795 2796 /* 2797 * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it, 2798 * and then prevent qcow2_do_open() from opening it), because this function 2799 * runs in the I/O path and as such we must not invoke global-state 2800 * functions like bdrv_unref_child() and bdrv_open_child(). 2801 */ 2802 2803 qcow2_do_close(bs, false); 2804 2805 data_file = s->data_file; 2806 memset(s, 0, sizeof(BDRVQcow2State)); 2807 s->data_file = data_file; 2808 2809 options = qdict_clone_shallow(bs->options); 2810 2811 flags &= ~BDRV_O_INACTIVE; 2812 qemu_co_mutex_lock(&s->lock); 2813 ret = qcow2_do_open(bs, options, flags, false, errp); 2814 qemu_co_mutex_unlock(&s->lock); 2815 qobject_unref(options); 2816 if (ret < 0) { 2817 error_prepend(errp, "Could not reopen qcow2 layer: "); 2818 bs->drv = NULL; 2819 return; 2820 } 2821 2822 s->crypto = crypto; 2823 } 2824 2825 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2826 size_t len, size_t buflen) 2827 { 2828 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2829 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2830 2831 if (buflen < ext_len) { 2832 return -ENOSPC; 2833 } 2834 2835 *ext_backing_fmt = (QCowExtension) { 2836 .magic = cpu_to_be32(magic), 2837 .len = cpu_to_be32(len), 2838 }; 2839 2840 if (len) { 2841 memcpy(buf + sizeof(QCowExtension), s, len); 2842 } 2843 2844 return ext_len; 2845 } 2846 2847 /* 2848 * Updates the qcow2 header, including the variable length parts of it, i.e. 2849 * the backing file name and all extensions. qcow2 was not designed to allow 2850 * such changes, so if we run out of space (we can only use the first cluster) 2851 * this function may fail. 2852 * 2853 * Returns 0 on success, -errno in error cases. 2854 */ 2855 int qcow2_update_header(BlockDriverState *bs) 2856 { 2857 BDRVQcow2State *s = bs->opaque; 2858 QCowHeader *header; 2859 char *buf; 2860 size_t buflen = s->cluster_size; 2861 int ret; 2862 uint64_t total_size; 2863 uint32_t refcount_table_clusters; 2864 size_t header_length; 2865 Qcow2UnknownHeaderExtension *uext; 2866 2867 buf = qemu_blockalign(bs, buflen); 2868 2869 /* Header structure */ 2870 header = (QCowHeader*) buf; 2871 2872 if (buflen < sizeof(*header)) { 2873 ret = -ENOSPC; 2874 goto fail; 2875 } 2876 2877 header_length = sizeof(*header) + s->unknown_header_fields_size; 2878 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2879 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2880 2881 ret = validate_compression_type(s, NULL); 2882 if (ret) { 2883 goto fail; 2884 } 2885 2886 *header = (QCowHeader) { 2887 /* Version 2 fields */ 2888 .magic = cpu_to_be32(QCOW_MAGIC), 2889 .version = cpu_to_be32(s->qcow_version), 2890 .backing_file_offset = 0, 2891 .backing_file_size = 0, 2892 .cluster_bits = cpu_to_be32(s->cluster_bits), 2893 .size = cpu_to_be64(total_size), 2894 .crypt_method = cpu_to_be32(s->crypt_method_header), 2895 .l1_size = cpu_to_be32(s->l1_size), 2896 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2897 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2898 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2899 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2900 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2901 2902 /* Version 3 fields */ 2903 .incompatible_features = cpu_to_be64(s->incompatible_features), 2904 .compatible_features = cpu_to_be64(s->compatible_features), 2905 .autoclear_features = cpu_to_be64(s->autoclear_features), 2906 .refcount_order = cpu_to_be32(s->refcount_order), 2907 .header_length = cpu_to_be32(header_length), 2908 .compression_type = s->compression_type, 2909 }; 2910 2911 /* For older versions, write a shorter header */ 2912 switch (s->qcow_version) { 2913 case 2: 2914 ret = offsetof(QCowHeader, incompatible_features); 2915 break; 2916 case 3: 2917 ret = sizeof(*header); 2918 break; 2919 default: 2920 ret = -EINVAL; 2921 goto fail; 2922 } 2923 2924 buf += ret; 2925 buflen -= ret; 2926 memset(buf, 0, buflen); 2927 2928 /* Preserve any unknown field in the header */ 2929 if (s->unknown_header_fields_size) { 2930 if (buflen < s->unknown_header_fields_size) { 2931 ret = -ENOSPC; 2932 goto fail; 2933 } 2934 2935 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2936 buf += s->unknown_header_fields_size; 2937 buflen -= s->unknown_header_fields_size; 2938 } 2939 2940 /* Backing file format header extension */ 2941 if (s->image_backing_format) { 2942 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2943 s->image_backing_format, 2944 strlen(s->image_backing_format), 2945 buflen); 2946 if (ret < 0) { 2947 goto fail; 2948 } 2949 2950 buf += ret; 2951 buflen -= ret; 2952 } 2953 2954 /* External data file header extension */ 2955 if (has_data_file(bs) && s->image_data_file) { 2956 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 2957 s->image_data_file, strlen(s->image_data_file), 2958 buflen); 2959 if (ret < 0) { 2960 goto fail; 2961 } 2962 2963 buf += ret; 2964 buflen -= ret; 2965 } 2966 2967 /* Full disk encryption header pointer extension */ 2968 if (s->crypto_header.offset != 0) { 2969 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 2970 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 2971 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2972 &s->crypto_header, sizeof(s->crypto_header), 2973 buflen); 2974 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 2975 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 2976 if (ret < 0) { 2977 goto fail; 2978 } 2979 buf += ret; 2980 buflen -= ret; 2981 } 2982 2983 /* 2984 * Feature table. A mere 8 feature names occupies 392 bytes, and 2985 * when coupled with the v3 minimum header of 104 bytes plus the 2986 * 8-byte end-of-extension marker, that would leave only 8 bytes 2987 * for a backing file name in an image with 512-byte clusters. 2988 * Thus, we choose to omit this header for cluster sizes 4k and 2989 * smaller. 2990 */ 2991 if (s->qcow_version >= 3 && s->cluster_size > 4096) { 2992 static const Qcow2Feature features[] = { 2993 { 2994 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2995 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2996 .name = "dirty bit", 2997 }, 2998 { 2999 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3000 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 3001 .name = "corrupt bit", 3002 }, 3003 { 3004 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3005 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 3006 .name = "external data file", 3007 }, 3008 { 3009 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3010 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR, 3011 .name = "compression type", 3012 }, 3013 { 3014 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 3015 .bit = QCOW2_INCOMPAT_EXTL2_BITNR, 3016 .name = "extended L2 entries", 3017 }, 3018 { 3019 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 3020 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 3021 .name = "lazy refcounts", 3022 }, 3023 { 3024 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3025 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR, 3026 .name = "bitmaps", 3027 }, 3028 { 3029 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 3030 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR, 3031 .name = "raw external data", 3032 }, 3033 }; 3034 3035 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 3036 features, sizeof(features), buflen); 3037 if (ret < 0) { 3038 goto fail; 3039 } 3040 buf += ret; 3041 buflen -= ret; 3042 } 3043 3044 /* Bitmap extension */ 3045 if (s->nb_bitmaps > 0) { 3046 Qcow2BitmapHeaderExt bitmaps_header = { 3047 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 3048 .bitmap_directory_size = 3049 cpu_to_be64(s->bitmap_directory_size), 3050 .bitmap_directory_offset = 3051 cpu_to_be64(s->bitmap_directory_offset) 3052 }; 3053 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 3054 &bitmaps_header, sizeof(bitmaps_header), 3055 buflen); 3056 if (ret < 0) { 3057 goto fail; 3058 } 3059 buf += ret; 3060 buflen -= ret; 3061 } 3062 3063 /* Keep unknown header extensions */ 3064 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 3065 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 3066 if (ret < 0) { 3067 goto fail; 3068 } 3069 3070 buf += ret; 3071 buflen -= ret; 3072 } 3073 3074 /* End of header extensions */ 3075 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 3076 if (ret < 0) { 3077 goto fail; 3078 } 3079 3080 buf += ret; 3081 buflen -= ret; 3082 3083 /* Backing file name */ 3084 if (s->image_backing_file) { 3085 size_t backing_file_len = strlen(s->image_backing_file); 3086 3087 if (buflen < backing_file_len) { 3088 ret = -ENOSPC; 3089 goto fail; 3090 } 3091 3092 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 3093 strncpy(buf, s->image_backing_file, buflen); 3094 3095 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 3096 header->backing_file_size = cpu_to_be32(backing_file_len); 3097 } 3098 3099 /* Write the new header */ 3100 ret = bdrv_pwrite(bs->file, 0, s->cluster_size, header, 0); 3101 if (ret < 0) { 3102 goto fail; 3103 } 3104 3105 ret = 0; 3106 fail: 3107 qemu_vfree(header); 3108 return ret; 3109 } 3110 3111 static int qcow2_change_backing_file(BlockDriverState *bs, 3112 const char *backing_file, const char *backing_fmt) 3113 { 3114 BDRVQcow2State *s = bs->opaque; 3115 3116 /* Adding a backing file means that the external data file alone won't be 3117 * enough to make sense of the content */ 3118 if (backing_file && data_file_is_raw(bs)) { 3119 return -EINVAL; 3120 } 3121 3122 if (backing_file && strlen(backing_file) > 1023) { 3123 return -EINVAL; 3124 } 3125 3126 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 3127 backing_file ?: ""); 3128 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 3129 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 3130 3131 g_free(s->image_backing_file); 3132 g_free(s->image_backing_format); 3133 3134 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 3135 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 3136 3137 return qcow2_update_header(bs); 3138 } 3139 3140 static int qcow2_set_up_encryption(BlockDriverState *bs, 3141 QCryptoBlockCreateOptions *cryptoopts, 3142 Error **errp) 3143 { 3144 BDRVQcow2State *s = bs->opaque; 3145 QCryptoBlock *crypto = NULL; 3146 int fmt, ret; 3147 3148 switch (cryptoopts->format) { 3149 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 3150 fmt = QCOW_CRYPT_LUKS; 3151 break; 3152 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 3153 fmt = QCOW_CRYPT_AES; 3154 break; 3155 default: 3156 error_setg(errp, "Crypto format not supported in qcow2"); 3157 return -EINVAL; 3158 } 3159 3160 s->crypt_method_header = fmt; 3161 3162 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 3163 qcow2_crypto_hdr_init_func, 3164 qcow2_crypto_hdr_write_func, 3165 bs, errp); 3166 if (!crypto) { 3167 return -EINVAL; 3168 } 3169 3170 ret = qcow2_update_header(bs); 3171 if (ret < 0) { 3172 error_setg_errno(errp, -ret, "Could not write encryption header"); 3173 goto out; 3174 } 3175 3176 ret = 0; 3177 out: 3178 qcrypto_block_free(crypto); 3179 return ret; 3180 } 3181 3182 /** 3183 * Preallocates metadata structures for data clusters between @offset (in the 3184 * guest disk) and @new_length (which is thus generally the new guest disk 3185 * size). 3186 * 3187 * Returns: 0 on success, -errno on failure. 3188 */ 3189 static int coroutine_fn GRAPH_RDLOCK 3190 preallocate_co(BlockDriverState *bs, uint64_t offset, uint64_t new_length, 3191 PreallocMode mode, Error **errp) 3192 { 3193 BDRVQcow2State *s = bs->opaque; 3194 uint64_t bytes; 3195 uint64_t host_offset = 0; 3196 int64_t file_length; 3197 unsigned int cur_bytes; 3198 int ret; 3199 QCowL2Meta *meta = NULL, *m; 3200 3201 assert(offset <= new_length); 3202 bytes = new_length - offset; 3203 3204 while (bytes) { 3205 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 3206 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes, 3207 &host_offset, &meta); 3208 if (ret < 0) { 3209 error_setg_errno(errp, -ret, "Allocating clusters failed"); 3210 goto out; 3211 } 3212 3213 for (m = meta; m != NULL; m = m->next) { 3214 m->prealloc = true; 3215 } 3216 3217 ret = qcow2_handle_l2meta(bs, &meta, true); 3218 if (ret < 0) { 3219 error_setg_errno(errp, -ret, "Mapping clusters failed"); 3220 goto out; 3221 } 3222 3223 /* TODO Preallocate data if requested */ 3224 3225 bytes -= cur_bytes; 3226 offset += cur_bytes; 3227 } 3228 3229 /* 3230 * It is expected that the image file is large enough to actually contain 3231 * all of the allocated clusters (otherwise we get failing reads after 3232 * EOF). Extend the image to the last allocated sector. 3233 */ 3234 file_length = bdrv_co_getlength(s->data_file->bs); 3235 if (file_length < 0) { 3236 error_setg_errno(errp, -file_length, "Could not get file size"); 3237 ret = file_length; 3238 goto out; 3239 } 3240 3241 if (host_offset + cur_bytes > file_length) { 3242 if (mode == PREALLOC_MODE_METADATA) { 3243 mode = PREALLOC_MODE_OFF; 3244 } 3245 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false, 3246 mode, 0, errp); 3247 if (ret < 0) { 3248 goto out; 3249 } 3250 } 3251 3252 ret = 0; 3253 3254 out: 3255 qcow2_handle_l2meta(bs, &meta, false); 3256 return ret; 3257 } 3258 3259 /* qcow2_refcount_metadata_size: 3260 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3261 * @cluster_size: size of a cluster, in bytes 3262 * @refcount_order: refcount bits power-of-2 exponent 3263 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3264 * needs to be 3265 * 3266 * Returns: Number of bytes required for refcount blocks and table metadata. 3267 */ 3268 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3269 int refcount_order, bool generous_increase, 3270 uint64_t *refblock_count) 3271 { 3272 /* 3273 * Every host cluster is reference-counted, including metadata (even 3274 * refcount metadata is recursively included). 3275 * 3276 * An accurate formula for the size of refcount metadata size is difficult 3277 * to derive. An easier method of calculation is finding the fixed point 3278 * where no further refcount blocks or table clusters are required to 3279 * reference count every cluster. 3280 */ 3281 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE; 3282 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3283 int64_t table = 0; /* number of refcount table clusters */ 3284 int64_t blocks = 0; /* number of refcount block clusters */ 3285 int64_t last; 3286 int64_t n = 0; 3287 3288 do { 3289 last = n; 3290 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3291 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3292 n = clusters + blocks + table; 3293 3294 if (n == last && generous_increase) { 3295 clusters += DIV_ROUND_UP(table, 2); 3296 n = 0; /* force another loop */ 3297 generous_increase = false; 3298 } 3299 } while (n != last); 3300 3301 if (refblock_count) { 3302 *refblock_count = blocks; 3303 } 3304 3305 return (blocks + table) * cluster_size; 3306 } 3307 3308 /** 3309 * qcow2_calc_prealloc_size: 3310 * @total_size: virtual disk size in bytes 3311 * @cluster_size: cluster size in bytes 3312 * @refcount_order: refcount bits power-of-2 exponent 3313 * @extended_l2: true if the image has extended L2 entries 3314 * 3315 * Returns: Total number of bytes required for the fully allocated image 3316 * (including metadata). 3317 */ 3318 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3319 size_t cluster_size, 3320 int refcount_order, 3321 bool extended_l2) 3322 { 3323 int64_t meta_size = 0; 3324 uint64_t nl1e, nl2e; 3325 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3326 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 3327 3328 /* header: 1 cluster */ 3329 meta_size += cluster_size; 3330 3331 /* total size of L2 tables */ 3332 nl2e = aligned_total_size / cluster_size; 3333 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size); 3334 meta_size += nl2e * l2e_size; 3335 3336 /* total size of L1 tables */ 3337 nl1e = nl2e * l2e_size / cluster_size; 3338 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE); 3339 meta_size += nl1e * L1E_SIZE; 3340 3341 /* total size of refcount table and blocks */ 3342 meta_size += qcow2_refcount_metadata_size( 3343 (meta_size + aligned_total_size) / cluster_size, 3344 cluster_size, refcount_order, false, NULL); 3345 3346 return meta_size + aligned_total_size; 3347 } 3348 3349 static bool validate_cluster_size(size_t cluster_size, bool extended_l2, 3350 Error **errp) 3351 { 3352 int cluster_bits = ctz32(cluster_size); 3353 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3354 (1 << cluster_bits) != cluster_size) 3355 { 3356 error_setg(errp, "Cluster size must be a power of two between %d and " 3357 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3358 return false; 3359 } 3360 3361 if (extended_l2) { 3362 unsigned min_cluster_size = 3363 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER; 3364 if (cluster_size < min_cluster_size) { 3365 error_setg(errp, "Extended L2 entries are only supported with " 3366 "cluster sizes of at least %u bytes", min_cluster_size); 3367 return false; 3368 } 3369 } 3370 3371 return true; 3372 } 3373 3374 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2, 3375 Error **errp) 3376 { 3377 size_t cluster_size; 3378 3379 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3380 DEFAULT_CLUSTER_SIZE); 3381 if (!validate_cluster_size(cluster_size, extended_l2, errp)) { 3382 return 0; 3383 } 3384 return cluster_size; 3385 } 3386 3387 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3388 { 3389 char *buf; 3390 int ret; 3391 3392 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3393 if (!buf) { 3394 ret = 3; /* default */ 3395 } else if (!strcmp(buf, "0.10")) { 3396 ret = 2; 3397 } else if (!strcmp(buf, "1.1")) { 3398 ret = 3; 3399 } else { 3400 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3401 ret = -EINVAL; 3402 } 3403 g_free(buf); 3404 return ret; 3405 } 3406 3407 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3408 Error **errp) 3409 { 3410 uint64_t refcount_bits; 3411 3412 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3413 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3414 error_setg(errp, "Refcount width must be a power of two and may not " 3415 "exceed 64 bits"); 3416 return 0; 3417 } 3418 3419 if (version < 3 && refcount_bits != 16) { 3420 error_setg(errp, "Different refcount widths than 16 bits require " 3421 "compatibility level 1.1 or above (use compat=1.1 or " 3422 "greater)"); 3423 return 0; 3424 } 3425 3426 return refcount_bits; 3427 } 3428 3429 static int coroutine_fn 3430 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3431 { 3432 BlockdevCreateOptionsQcow2 *qcow2_opts; 3433 QDict *options; 3434 3435 /* 3436 * Open the image file and write a minimal qcow2 header. 3437 * 3438 * We keep things simple and start with a zero-sized image. We also 3439 * do without refcount blocks or a L1 table for now. We'll fix the 3440 * inconsistency later. 3441 * 3442 * We do need a refcount table because growing the refcount table means 3443 * allocating two new refcount blocks - the second of which would be at 3444 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3445 * size for any qcow2 image. 3446 */ 3447 BlockBackend *blk = NULL; 3448 BlockDriverState *bs = NULL; 3449 BlockDriverState *data_bs = NULL; 3450 QCowHeader *header; 3451 size_t cluster_size; 3452 int version; 3453 int refcount_order; 3454 uint64_t *refcount_table; 3455 int ret; 3456 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 3457 3458 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3459 qcow2_opts = &create_options->u.qcow2; 3460 3461 bs = bdrv_co_open_blockdev_ref(qcow2_opts->file, errp); 3462 if (bs == NULL) { 3463 return -EIO; 3464 } 3465 3466 /* Validate options and set default values */ 3467 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3468 error_setg(errp, "Image size must be a multiple of %u bytes", 3469 (unsigned) BDRV_SECTOR_SIZE); 3470 ret = -EINVAL; 3471 goto out; 3472 } 3473 3474 if (qcow2_opts->has_version) { 3475 switch (qcow2_opts->version) { 3476 case BLOCKDEV_QCOW2_VERSION_V2: 3477 version = 2; 3478 break; 3479 case BLOCKDEV_QCOW2_VERSION_V3: 3480 version = 3; 3481 break; 3482 default: 3483 g_assert_not_reached(); 3484 } 3485 } else { 3486 version = 3; 3487 } 3488 3489 if (qcow2_opts->has_cluster_size) { 3490 cluster_size = qcow2_opts->cluster_size; 3491 } else { 3492 cluster_size = DEFAULT_CLUSTER_SIZE; 3493 } 3494 3495 if (!qcow2_opts->has_extended_l2) { 3496 qcow2_opts->extended_l2 = false; 3497 } 3498 if (qcow2_opts->extended_l2) { 3499 if (version < 3) { 3500 error_setg(errp, "Extended L2 entries are only supported with " 3501 "compatibility level 1.1 and above (use version=v3 or " 3502 "greater)"); 3503 ret = -EINVAL; 3504 goto out; 3505 } 3506 } 3507 3508 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) { 3509 ret = -EINVAL; 3510 goto out; 3511 } 3512 3513 if (!qcow2_opts->has_preallocation) { 3514 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3515 } 3516 if (qcow2_opts->backing_file && 3517 qcow2_opts->preallocation != PREALLOC_MODE_OFF && 3518 !qcow2_opts->extended_l2) 3519 { 3520 error_setg(errp, "Backing file and preallocation can only be used at " 3521 "the same time if extended_l2 is on"); 3522 ret = -EINVAL; 3523 goto out; 3524 } 3525 if (qcow2_opts->has_backing_fmt && !qcow2_opts->backing_file) { 3526 error_setg(errp, "Backing format cannot be used without backing file"); 3527 ret = -EINVAL; 3528 goto out; 3529 } 3530 3531 if (!qcow2_opts->has_lazy_refcounts) { 3532 qcow2_opts->lazy_refcounts = false; 3533 } 3534 if (version < 3 && qcow2_opts->lazy_refcounts) { 3535 error_setg(errp, "Lazy refcounts only supported with compatibility " 3536 "level 1.1 and above (use version=v3 or greater)"); 3537 ret = -EINVAL; 3538 goto out; 3539 } 3540 3541 if (!qcow2_opts->has_refcount_bits) { 3542 qcow2_opts->refcount_bits = 16; 3543 } 3544 if (qcow2_opts->refcount_bits > 64 || 3545 !is_power_of_2(qcow2_opts->refcount_bits)) 3546 { 3547 error_setg(errp, "Refcount width must be a power of two and may not " 3548 "exceed 64 bits"); 3549 ret = -EINVAL; 3550 goto out; 3551 } 3552 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3553 error_setg(errp, "Different refcount widths than 16 bits require " 3554 "compatibility level 1.1 or above (use version=v3 or " 3555 "greater)"); 3556 ret = -EINVAL; 3557 goto out; 3558 } 3559 refcount_order = ctz32(qcow2_opts->refcount_bits); 3560 3561 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3562 error_setg(errp, "data-file-raw requires data-file"); 3563 ret = -EINVAL; 3564 goto out; 3565 } 3566 if (qcow2_opts->data_file_raw && qcow2_opts->backing_file) { 3567 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3568 "the same time"); 3569 ret = -EINVAL; 3570 goto out; 3571 } 3572 if (qcow2_opts->data_file_raw && 3573 qcow2_opts->preallocation == PREALLOC_MODE_OFF) 3574 { 3575 /* 3576 * data-file-raw means that "the external data file can be 3577 * read as a consistent standalone raw image without looking 3578 * at the qcow2 metadata." It does not say that the metadata 3579 * must be ignored, though (and the qcow2 driver in fact does 3580 * not ignore it), so the L1/L2 tables must be present and 3581 * give a 1:1 mapping, so you get the same result regardless 3582 * of whether you look at the metadata or whether you ignore 3583 * it. 3584 */ 3585 qcow2_opts->preallocation = PREALLOC_MODE_METADATA; 3586 3587 /* 3588 * Cannot use preallocation with backing files, but giving a 3589 * backing file when specifying data_file_raw is an error 3590 * anyway. 3591 */ 3592 assert(!qcow2_opts->backing_file); 3593 } 3594 3595 if (qcow2_opts->data_file) { 3596 if (version < 3) { 3597 error_setg(errp, "External data files are only supported with " 3598 "compatibility level 1.1 and above (use version=v3 or " 3599 "greater)"); 3600 ret = -EINVAL; 3601 goto out; 3602 } 3603 data_bs = bdrv_co_open_blockdev_ref(qcow2_opts->data_file, errp); 3604 if (data_bs == NULL) { 3605 ret = -EIO; 3606 goto out; 3607 } 3608 } 3609 3610 if (qcow2_opts->has_compression_type && 3611 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3612 3613 ret = -EINVAL; 3614 3615 if (version < 3) { 3616 error_setg(errp, "Non-zlib compression type is only supported with " 3617 "compatibility level 1.1 and above (use version=v3 or " 3618 "greater)"); 3619 goto out; 3620 } 3621 3622 switch (qcow2_opts->compression_type) { 3623 #ifdef CONFIG_ZSTD 3624 case QCOW2_COMPRESSION_TYPE_ZSTD: 3625 break; 3626 #endif 3627 default: 3628 error_setg(errp, "Unknown compression type"); 3629 goto out; 3630 } 3631 3632 compression_type = qcow2_opts->compression_type; 3633 } 3634 3635 /* Create BlockBackend to write to the image */ 3636 blk = blk_co_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL, 3637 errp); 3638 if (!blk) { 3639 ret = -EPERM; 3640 goto out; 3641 } 3642 blk_set_allow_write_beyond_eof(blk, true); 3643 3644 /* Write the header */ 3645 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3646 header = g_malloc0(cluster_size); 3647 *header = (QCowHeader) { 3648 .magic = cpu_to_be32(QCOW_MAGIC), 3649 .version = cpu_to_be32(version), 3650 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3651 .size = cpu_to_be64(0), 3652 .l1_table_offset = cpu_to_be64(0), 3653 .l1_size = cpu_to_be32(0), 3654 .refcount_table_offset = cpu_to_be64(cluster_size), 3655 .refcount_table_clusters = cpu_to_be32(1), 3656 .refcount_order = cpu_to_be32(refcount_order), 3657 /* don't deal with endianness since compression_type is 1 byte long */ 3658 .compression_type = compression_type, 3659 .header_length = cpu_to_be32(sizeof(*header)), 3660 }; 3661 3662 /* We'll update this to correct value later */ 3663 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3664 3665 if (qcow2_opts->lazy_refcounts) { 3666 header->compatible_features |= 3667 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3668 } 3669 if (data_bs) { 3670 header->incompatible_features |= 3671 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3672 } 3673 if (qcow2_opts->data_file_raw) { 3674 header->autoclear_features |= 3675 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3676 } 3677 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) { 3678 header->incompatible_features |= 3679 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION); 3680 } 3681 3682 if (qcow2_opts->extended_l2) { 3683 header->incompatible_features |= 3684 cpu_to_be64(QCOW2_INCOMPAT_EXTL2); 3685 } 3686 3687 ret = blk_co_pwrite(blk, 0, cluster_size, header, 0); 3688 g_free(header); 3689 if (ret < 0) { 3690 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3691 goto out; 3692 } 3693 3694 /* Write a refcount table with one refcount block */ 3695 refcount_table = g_malloc0(2 * cluster_size); 3696 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3697 ret = blk_co_pwrite(blk, cluster_size, 2 * cluster_size, refcount_table, 0); 3698 g_free(refcount_table); 3699 3700 if (ret < 0) { 3701 error_setg_errno(errp, -ret, "Could not write refcount table"); 3702 goto out; 3703 } 3704 3705 blk_co_unref(blk); 3706 blk = NULL; 3707 3708 /* 3709 * And now open the image and make it consistent first (i.e. increase the 3710 * refcount of the cluster that is occupied by the header and the refcount 3711 * table) 3712 */ 3713 options = qdict_new(); 3714 qdict_put_str(options, "driver", "qcow2"); 3715 qdict_put_str(options, "file", bs->node_name); 3716 if (data_bs) { 3717 qdict_put_str(options, "data-file", data_bs->node_name); 3718 } 3719 blk = blk_co_new_open(NULL, NULL, options, 3720 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3721 errp); 3722 if (blk == NULL) { 3723 ret = -EIO; 3724 goto out; 3725 } 3726 3727 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3728 if (ret < 0) { 3729 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3730 "header and refcount table"); 3731 goto out; 3732 3733 } else if (ret != 0) { 3734 error_report("Huh, first cluster in empty image is already in use?"); 3735 abort(); 3736 } 3737 3738 /* Set the external data file if necessary */ 3739 if (data_bs) { 3740 BDRVQcow2State *s = blk_bs(blk)->opaque; 3741 s->image_data_file = g_strdup(data_bs->filename); 3742 } 3743 3744 /* Create a full header (including things like feature table) */ 3745 ret = qcow2_update_header(blk_bs(blk)); 3746 if (ret < 0) { 3747 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3748 goto out; 3749 } 3750 3751 /* Okay, now that we have a valid image, let's give it the right size */ 3752 ret = blk_co_truncate(blk, qcow2_opts->size, false, 3753 qcow2_opts->preallocation, 0, errp); 3754 if (ret < 0) { 3755 error_prepend(errp, "Could not resize image: "); 3756 goto out; 3757 } 3758 3759 /* Want a backing file? There you go. */ 3760 if (qcow2_opts->backing_file) { 3761 const char *backing_format = NULL; 3762 3763 if (qcow2_opts->has_backing_fmt) { 3764 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3765 } 3766 3767 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3768 backing_format, false); 3769 if (ret < 0) { 3770 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3771 "with format '%s'", qcow2_opts->backing_file, 3772 backing_format); 3773 goto out; 3774 } 3775 } 3776 3777 /* Want encryption? There you go. */ 3778 if (qcow2_opts->encrypt) { 3779 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3780 if (ret < 0) { 3781 goto out; 3782 } 3783 } 3784 3785 blk_co_unref(blk); 3786 blk = NULL; 3787 3788 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3789 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3790 * have to setup decryption context. We're not doing any I/O on the top 3791 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3792 * not have effect. 3793 */ 3794 options = qdict_new(); 3795 qdict_put_str(options, "driver", "qcow2"); 3796 qdict_put_str(options, "file", bs->node_name); 3797 if (data_bs) { 3798 qdict_put_str(options, "data-file", data_bs->node_name); 3799 } 3800 blk = blk_co_new_open(NULL, NULL, options, 3801 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3802 errp); 3803 if (blk == NULL) { 3804 ret = -EIO; 3805 goto out; 3806 } 3807 3808 ret = 0; 3809 out: 3810 blk_co_unref(blk); 3811 bdrv_co_unref(bs); 3812 bdrv_co_unref(data_bs); 3813 return ret; 3814 } 3815 3816 static int coroutine_fn GRAPH_RDLOCK 3817 qcow2_co_create_opts(BlockDriver *drv, const char *filename, QemuOpts *opts, 3818 Error **errp) 3819 { 3820 BlockdevCreateOptions *create_options = NULL; 3821 QDict *qdict; 3822 Visitor *v; 3823 BlockDriverState *bs = NULL; 3824 BlockDriverState *data_bs = NULL; 3825 const char *val; 3826 int ret; 3827 3828 /* Only the keyval visitor supports the dotted syntax needed for 3829 * encryption, so go through a QDict before getting a QAPI type. Ignore 3830 * options meant for the protocol layer so that the visitor doesn't 3831 * complain. */ 3832 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3833 true); 3834 3835 /* Handle encryption options */ 3836 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3837 if (val && !strcmp(val, "on")) { 3838 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3839 } else if (val && !strcmp(val, "off")) { 3840 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3841 } 3842 3843 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3844 if (val && !strcmp(val, "aes")) { 3845 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3846 } 3847 3848 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3849 * version=v2/v3 below. */ 3850 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3851 if (val && !strcmp(val, "0.10")) { 3852 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3853 } else if (val && !strcmp(val, "1.1")) { 3854 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3855 } 3856 3857 /* Change legacy command line options into QMP ones */ 3858 static const QDictRenames opt_renames[] = { 3859 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3860 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3861 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3862 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3863 { BLOCK_OPT_EXTL2, "extended-l2" }, 3864 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3865 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3866 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3867 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3868 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" }, 3869 { NULL, NULL }, 3870 }; 3871 3872 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3873 ret = -EINVAL; 3874 goto finish; 3875 } 3876 3877 /* Create and open the file (protocol layer) */ 3878 ret = bdrv_co_create_file(filename, opts, errp); 3879 if (ret < 0) { 3880 goto finish; 3881 } 3882 3883 bs = bdrv_co_open(filename, NULL, NULL, 3884 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3885 if (bs == NULL) { 3886 ret = -EIO; 3887 goto finish; 3888 } 3889 3890 /* Create and open an external data file (protocol layer) */ 3891 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3892 if (val) { 3893 ret = bdrv_co_create_file(val, opts, errp); 3894 if (ret < 0) { 3895 goto finish; 3896 } 3897 3898 data_bs = bdrv_co_open(val, NULL, NULL, 3899 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3900 errp); 3901 if (data_bs == NULL) { 3902 ret = -EIO; 3903 goto finish; 3904 } 3905 3906 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3907 qdict_put_str(qdict, "data-file", data_bs->node_name); 3908 } 3909 3910 /* Set 'driver' and 'node' options */ 3911 qdict_put_str(qdict, "driver", "qcow2"); 3912 qdict_put_str(qdict, "file", bs->node_name); 3913 3914 /* Now get the QAPI type BlockdevCreateOptions */ 3915 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3916 if (!v) { 3917 ret = -EINVAL; 3918 goto finish; 3919 } 3920 3921 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp); 3922 visit_free(v); 3923 if (!create_options) { 3924 ret = -EINVAL; 3925 goto finish; 3926 } 3927 3928 /* Silently round up size */ 3929 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3930 BDRV_SECTOR_SIZE); 3931 3932 /* Create the qcow2 image (format layer) */ 3933 ret = qcow2_co_create(create_options, errp); 3934 finish: 3935 if (ret < 0) { 3936 bdrv_co_delete_file_noerr(bs); 3937 bdrv_co_delete_file_noerr(data_bs); 3938 } else { 3939 ret = 0; 3940 } 3941 3942 qobject_unref(qdict); 3943 bdrv_co_unref(bs); 3944 bdrv_co_unref(data_bs); 3945 qapi_free_BlockdevCreateOptions(create_options); 3946 return ret; 3947 } 3948 3949 3950 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3951 { 3952 int64_t nr; 3953 int res; 3954 3955 /* Clamp to image length, before checking status of underlying sectors */ 3956 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3957 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3958 } 3959 3960 if (!bytes) { 3961 return true; 3962 } 3963 3964 /* 3965 * bdrv_block_status_above doesn't merge different types of zeros, for 3966 * example, zeros which come from the region which is unallocated in 3967 * the whole backing chain, and zeros which come because of a short 3968 * backing file. So, we need a loop. 3969 */ 3970 do { 3971 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3972 offset += nr; 3973 bytes -= nr; 3974 } while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes); 3975 3976 return res >= 0 && (res & BDRV_BLOCK_ZERO) && bytes == 0; 3977 } 3978 3979 static int coroutine_fn GRAPH_RDLOCK 3980 qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, 3981 BdrvRequestFlags flags) 3982 { 3983 int ret; 3984 BDRVQcow2State *s = bs->opaque; 3985 3986 uint32_t head = offset_into_subcluster(s, offset); 3987 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) - 3988 (offset + bytes); 3989 3990 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3991 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3992 tail = 0; 3993 } 3994 3995 if (head || tail) { 3996 uint64_t off; 3997 unsigned int nr; 3998 QCow2SubclusterType type; 3999 4000 assert(head + bytes + tail <= s->subcluster_size); 4001 4002 /* check whether remainder of cluster already reads as zero */ 4003 if (!(is_zero(bs, offset - head, head) && 4004 is_zero(bs, offset + bytes, tail))) { 4005 return -ENOTSUP; 4006 } 4007 4008 qemu_co_mutex_lock(&s->lock); 4009 /* We can have new write after previous check */ 4010 offset -= head; 4011 bytes = s->subcluster_size; 4012 nr = s->subcluster_size; 4013 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type); 4014 if (ret < 0 || 4015 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && 4016 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && 4017 type != QCOW2_SUBCLUSTER_ZERO_PLAIN && 4018 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) { 4019 qemu_co_mutex_unlock(&s->lock); 4020 return ret < 0 ? ret : -ENOTSUP; 4021 } 4022 } else { 4023 qemu_co_mutex_lock(&s->lock); 4024 } 4025 4026 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 4027 4028 /* Whatever is left can use real zero subclusters */ 4029 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags); 4030 qemu_co_mutex_unlock(&s->lock); 4031 4032 return ret; 4033 } 4034 4035 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 4036 int64_t offset, int64_t bytes) 4037 { 4038 int ret; 4039 BDRVQcow2State *s = bs->opaque; 4040 4041 /* If the image does not support QCOW_OFLAG_ZERO then discarding 4042 * clusters could expose stale data from the backing file. */ 4043 if (s->qcow_version < 3 && bs->backing) { 4044 return -ENOTSUP; 4045 } 4046 4047 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 4048 assert(bytes < s->cluster_size); 4049 /* Ignore partial clusters, except for the special case of the 4050 * complete partial cluster at the end of an unaligned file */ 4051 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 4052 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 4053 return -ENOTSUP; 4054 } 4055 } 4056 4057 qemu_co_mutex_lock(&s->lock); 4058 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 4059 false); 4060 qemu_co_mutex_unlock(&s->lock); 4061 return ret; 4062 } 4063 4064 static int coroutine_fn GRAPH_RDLOCK 4065 qcow2_co_copy_range_from(BlockDriverState *bs, 4066 BdrvChild *src, int64_t src_offset, 4067 BdrvChild *dst, int64_t dst_offset, 4068 int64_t bytes, BdrvRequestFlags read_flags, 4069 BdrvRequestFlags write_flags) 4070 { 4071 BDRVQcow2State *s = bs->opaque; 4072 int ret; 4073 unsigned int cur_bytes; /* number of bytes in current iteration */ 4074 BdrvChild *child = NULL; 4075 BdrvRequestFlags cur_write_flags; 4076 4077 assert(!bs->encrypted); 4078 qemu_co_mutex_lock(&s->lock); 4079 4080 while (bytes != 0) { 4081 uint64_t copy_offset = 0; 4082 QCow2SubclusterType type; 4083 /* prepare next request */ 4084 cur_bytes = MIN(bytes, INT_MAX); 4085 cur_write_flags = write_flags; 4086 4087 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes, 4088 ©_offset, &type); 4089 if (ret < 0) { 4090 goto out; 4091 } 4092 4093 switch (type) { 4094 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 4095 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 4096 if (bs->backing && bs->backing->bs) { 4097 int64_t backing_length = bdrv_co_getlength(bs->backing->bs); 4098 if (src_offset >= backing_length) { 4099 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4100 } else { 4101 child = bs->backing; 4102 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 4103 copy_offset = src_offset; 4104 } 4105 } else { 4106 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4107 } 4108 break; 4109 4110 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 4111 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 4112 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 4113 break; 4114 4115 case QCOW2_SUBCLUSTER_COMPRESSED: 4116 ret = -ENOTSUP; 4117 goto out; 4118 4119 case QCOW2_SUBCLUSTER_NORMAL: 4120 child = s->data_file; 4121 break; 4122 4123 default: 4124 abort(); 4125 } 4126 qemu_co_mutex_unlock(&s->lock); 4127 ret = bdrv_co_copy_range_from(child, 4128 copy_offset, 4129 dst, dst_offset, 4130 cur_bytes, read_flags, cur_write_flags); 4131 qemu_co_mutex_lock(&s->lock); 4132 if (ret < 0) { 4133 goto out; 4134 } 4135 4136 bytes -= cur_bytes; 4137 src_offset += cur_bytes; 4138 dst_offset += cur_bytes; 4139 } 4140 ret = 0; 4141 4142 out: 4143 qemu_co_mutex_unlock(&s->lock); 4144 return ret; 4145 } 4146 4147 static int coroutine_fn GRAPH_RDLOCK 4148 qcow2_co_copy_range_to(BlockDriverState *bs, 4149 BdrvChild *src, int64_t src_offset, 4150 BdrvChild *dst, int64_t dst_offset, 4151 int64_t bytes, BdrvRequestFlags read_flags, 4152 BdrvRequestFlags write_flags) 4153 { 4154 BDRVQcow2State *s = bs->opaque; 4155 int ret; 4156 unsigned int cur_bytes; /* number of sectors in current iteration */ 4157 uint64_t host_offset; 4158 QCowL2Meta *l2meta = NULL; 4159 4160 assert(!bs->encrypted); 4161 4162 qemu_co_mutex_lock(&s->lock); 4163 4164 while (bytes != 0) { 4165 4166 l2meta = NULL; 4167 4168 cur_bytes = MIN(bytes, INT_MAX); 4169 4170 /* TODO: 4171 * If src->bs == dst->bs, we could simply copy by incrementing 4172 * the refcnt, without copying user data. 4173 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 4174 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes, 4175 &host_offset, &l2meta); 4176 if (ret < 0) { 4177 goto fail; 4178 } 4179 4180 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes, 4181 true); 4182 if (ret < 0) { 4183 goto fail; 4184 } 4185 4186 qemu_co_mutex_unlock(&s->lock); 4187 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset, 4188 cur_bytes, read_flags, write_flags); 4189 qemu_co_mutex_lock(&s->lock); 4190 if (ret < 0) { 4191 goto fail; 4192 } 4193 4194 ret = qcow2_handle_l2meta(bs, &l2meta, true); 4195 if (ret) { 4196 goto fail; 4197 } 4198 4199 bytes -= cur_bytes; 4200 src_offset += cur_bytes; 4201 dst_offset += cur_bytes; 4202 } 4203 ret = 0; 4204 4205 fail: 4206 qcow2_handle_l2meta(bs, &l2meta, false); 4207 4208 qemu_co_mutex_unlock(&s->lock); 4209 4210 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 4211 4212 return ret; 4213 } 4214 4215 static int coroutine_fn GRAPH_RDLOCK 4216 qcow2_co_truncate(BlockDriverState *bs, int64_t offset, bool exact, 4217 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp) 4218 { 4219 BDRVQcow2State *s = bs->opaque; 4220 uint64_t old_length; 4221 int64_t new_l1_size; 4222 int ret; 4223 QDict *options; 4224 4225 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 4226 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 4227 { 4228 error_setg(errp, "Unsupported preallocation mode '%s'", 4229 PreallocMode_str(prealloc)); 4230 return -ENOTSUP; 4231 } 4232 4233 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) { 4234 error_setg(errp, "The new size must be a multiple of %u", 4235 (unsigned) BDRV_SECTOR_SIZE); 4236 return -EINVAL; 4237 } 4238 4239 qemu_co_mutex_lock(&s->lock); 4240 4241 /* 4242 * Even though we store snapshot size for all images, it was not 4243 * required until v3, so it is not safe to proceed for v2. 4244 */ 4245 if (s->nb_snapshots && s->qcow_version < 3) { 4246 error_setg(errp, "Can't resize a v2 image which has snapshots"); 4247 ret = -ENOTSUP; 4248 goto fail; 4249 } 4250 4251 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */ 4252 if (qcow2_truncate_bitmaps_check(bs, errp)) { 4253 ret = -ENOTSUP; 4254 goto fail; 4255 } 4256 4257 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 4258 new_l1_size = size_to_l1(s, offset); 4259 4260 if (offset < old_length) { 4261 int64_t last_cluster, old_file_size; 4262 if (prealloc != PREALLOC_MODE_OFF) { 4263 error_setg(errp, 4264 "Preallocation can't be used for shrinking an image"); 4265 ret = -EINVAL; 4266 goto fail; 4267 } 4268 4269 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 4270 old_length - ROUND_UP(offset, 4271 s->cluster_size), 4272 QCOW2_DISCARD_ALWAYS, true); 4273 if (ret < 0) { 4274 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 4275 goto fail; 4276 } 4277 4278 ret = qcow2_shrink_l1_table(bs, new_l1_size); 4279 if (ret < 0) { 4280 error_setg_errno(errp, -ret, 4281 "Failed to reduce the number of L2 tables"); 4282 goto fail; 4283 } 4284 4285 ret = qcow2_shrink_reftable(bs); 4286 if (ret < 0) { 4287 error_setg_errno(errp, -ret, 4288 "Failed to discard unused refblocks"); 4289 goto fail; 4290 } 4291 4292 old_file_size = bdrv_co_getlength(bs->file->bs); 4293 if (old_file_size < 0) { 4294 error_setg_errno(errp, -old_file_size, 4295 "Failed to inquire current file length"); 4296 ret = old_file_size; 4297 goto fail; 4298 } 4299 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4300 if (last_cluster < 0) { 4301 error_setg_errno(errp, -last_cluster, 4302 "Failed to find the last cluster"); 4303 ret = last_cluster; 4304 goto fail; 4305 } 4306 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 4307 Error *local_err = NULL; 4308 4309 /* 4310 * Do not pass @exact here: It will not help the user if 4311 * we get an error here just because they wanted to shrink 4312 * their qcow2 image (on a block device) with qemu-img. 4313 * (And on the qcow2 layer, the @exact requirement is 4314 * always fulfilled, so there is no need to pass it on.) 4315 */ 4316 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 4317 false, PREALLOC_MODE_OFF, 0, &local_err); 4318 if (local_err) { 4319 warn_reportf_err(local_err, 4320 "Failed to truncate the tail of the image: "); 4321 } 4322 } 4323 } else { 4324 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 4325 if (ret < 0) { 4326 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 4327 goto fail; 4328 } 4329 4330 if (data_file_is_raw(bs) && prealloc == PREALLOC_MODE_OFF) { 4331 /* 4332 * When creating a qcow2 image with data-file-raw, we enforce 4333 * at least prealloc=metadata, so that the L1/L2 tables are 4334 * fully allocated and reading from the data file will return 4335 * the same data as reading from the qcow2 image. When the 4336 * image is grown, we must consequently preallocate the 4337 * metadata structures to cover the added area. 4338 */ 4339 prealloc = PREALLOC_MODE_METADATA; 4340 } 4341 } 4342 4343 switch (prealloc) { 4344 case PREALLOC_MODE_OFF: 4345 if (has_data_file(bs)) { 4346 /* 4347 * If the caller wants an exact resize, the external data 4348 * file should be resized to the exact target size, too, 4349 * so we pass @exact here. 4350 */ 4351 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0, 4352 errp); 4353 if (ret < 0) { 4354 goto fail; 4355 } 4356 } 4357 break; 4358 4359 case PREALLOC_MODE_METADATA: 4360 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4361 if (ret < 0) { 4362 goto fail; 4363 } 4364 break; 4365 4366 case PREALLOC_MODE_FALLOC: 4367 case PREALLOC_MODE_FULL: 4368 { 4369 int64_t allocation_start, host_offset, guest_offset; 4370 int64_t clusters_allocated; 4371 int64_t old_file_size, last_cluster, new_file_size; 4372 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4373 bool subclusters_need_allocation = false; 4374 4375 /* With a data file, preallocation means just allocating the metadata 4376 * and forwarding the truncate request to the data file */ 4377 if (has_data_file(bs)) { 4378 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4379 if (ret < 0) { 4380 goto fail; 4381 } 4382 break; 4383 } 4384 4385 old_file_size = bdrv_co_getlength(bs->file->bs); 4386 if (old_file_size < 0) { 4387 error_setg_errno(errp, -old_file_size, 4388 "Failed to inquire current file length"); 4389 ret = old_file_size; 4390 goto fail; 4391 } 4392 4393 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4394 if (last_cluster >= 0) { 4395 old_file_size = (last_cluster + 1) * s->cluster_size; 4396 } else { 4397 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4398 } 4399 4400 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) - 4401 start_of_cluster(s, old_length)) >> s->cluster_bits; 4402 4403 /* This is an overestimation; we will not actually allocate space for 4404 * these in the file but just make sure the new refcount structures are 4405 * able to cover them so we will not have to allocate new refblocks 4406 * while entering the data blocks in the potentially new L2 tables. 4407 * (We do not actually care where the L2 tables are placed. Maybe they 4408 * are already allocated or they can be placed somewhere before 4409 * @old_file_size. It does not matter because they will be fully 4410 * allocated automatically, so they do not need to be covered by the 4411 * preallocation. All that matters is that we will not have to allocate 4412 * new refcount structures for them.) */ 4413 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4414 s->cluster_size / l2_entry_size(s)); 4415 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4416 * table for a potential head/tail */ 4417 nb_new_l2_tables++; 4418 4419 allocation_start = qcow2_refcount_area(bs, old_file_size, 4420 nb_new_data_clusters + 4421 nb_new_l2_tables, 4422 true, 0, 0); 4423 if (allocation_start < 0) { 4424 error_setg_errno(errp, -allocation_start, 4425 "Failed to resize refcount structures"); 4426 ret = allocation_start; 4427 goto fail; 4428 } 4429 4430 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4431 nb_new_data_clusters); 4432 if (clusters_allocated < 0) { 4433 error_setg_errno(errp, -clusters_allocated, 4434 "Failed to allocate data clusters"); 4435 ret = clusters_allocated; 4436 goto fail; 4437 } 4438 4439 assert(clusters_allocated == nb_new_data_clusters); 4440 4441 /* Allocate the data area */ 4442 new_file_size = allocation_start + 4443 nb_new_data_clusters * s->cluster_size; 4444 /* 4445 * Image file grows, so @exact does not matter. 4446 * 4447 * If we need to zero out the new area, try first whether the protocol 4448 * driver can already take care of this. 4449 */ 4450 if (flags & BDRV_REQ_ZERO_WRITE) { 4451 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 4452 BDRV_REQ_ZERO_WRITE, NULL); 4453 if (ret >= 0) { 4454 flags &= ~BDRV_REQ_ZERO_WRITE; 4455 /* Ensure that we read zeroes and not backing file data */ 4456 subclusters_need_allocation = true; 4457 } 4458 } else { 4459 ret = -1; 4460 } 4461 if (ret < 0) { 4462 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0, 4463 errp); 4464 } 4465 if (ret < 0) { 4466 error_prepend(errp, "Failed to resize underlying file: "); 4467 qcow2_free_clusters(bs, allocation_start, 4468 nb_new_data_clusters * s->cluster_size, 4469 QCOW2_DISCARD_OTHER); 4470 goto fail; 4471 } 4472 4473 /* Create the necessary L2 entries */ 4474 host_offset = allocation_start; 4475 guest_offset = old_length; 4476 while (nb_new_data_clusters) { 4477 int64_t nb_clusters = MIN( 4478 nb_new_data_clusters, 4479 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4480 unsigned cow_start_length = offset_into_cluster(s, guest_offset); 4481 QCowL2Meta allocation; 4482 guest_offset = start_of_cluster(s, guest_offset); 4483 allocation = (QCowL2Meta) { 4484 .offset = guest_offset, 4485 .alloc_offset = host_offset, 4486 .nb_clusters = nb_clusters, 4487 .cow_start = { 4488 .offset = 0, 4489 .nb_bytes = cow_start_length, 4490 }, 4491 .cow_end = { 4492 .offset = nb_clusters << s->cluster_bits, 4493 .nb_bytes = 0, 4494 }, 4495 .prealloc = !subclusters_need_allocation, 4496 }; 4497 qemu_co_queue_init(&allocation.dependent_requests); 4498 4499 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4500 if (ret < 0) { 4501 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4502 qcow2_free_clusters(bs, host_offset, 4503 nb_new_data_clusters * s->cluster_size, 4504 QCOW2_DISCARD_OTHER); 4505 goto fail; 4506 } 4507 4508 guest_offset += nb_clusters * s->cluster_size; 4509 host_offset += nb_clusters * s->cluster_size; 4510 nb_new_data_clusters -= nb_clusters; 4511 } 4512 break; 4513 } 4514 4515 default: 4516 g_assert_not_reached(); 4517 } 4518 4519 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) { 4520 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size); 4521 4522 /* 4523 * Use zero clusters as much as we can. qcow2_subcluster_zeroize() 4524 * requires a subcluster-aligned start. The end may be unaligned if 4525 * it is at the end of the image (which it is here). 4526 */ 4527 if (offset > zero_start) { 4528 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start, 4529 0); 4530 if (ret < 0) { 4531 error_setg_errno(errp, -ret, "Failed to zero out new clusters"); 4532 goto fail; 4533 } 4534 } 4535 4536 /* Write explicit zeros for the unaligned head */ 4537 if (zero_start > old_length) { 4538 uint64_t len = MIN(zero_start, offset) - old_length; 4539 uint8_t *buf = qemu_blockalign0(bs, len); 4540 QEMUIOVector qiov; 4541 qemu_iovec_init_buf(&qiov, buf, len); 4542 4543 qemu_co_mutex_unlock(&s->lock); 4544 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0); 4545 qemu_co_mutex_lock(&s->lock); 4546 4547 qemu_vfree(buf); 4548 if (ret < 0) { 4549 error_setg_errno(errp, -ret, "Failed to zero out the new area"); 4550 goto fail; 4551 } 4552 } 4553 } 4554 4555 if (prealloc != PREALLOC_MODE_OFF) { 4556 /* Flush metadata before actually changing the image size */ 4557 ret = qcow2_write_caches(bs); 4558 if (ret < 0) { 4559 error_setg_errno(errp, -ret, 4560 "Failed to flush the preallocated area to disk"); 4561 goto fail; 4562 } 4563 } 4564 4565 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4566 4567 /* write updated header.size */ 4568 offset = cpu_to_be64(offset); 4569 ret = bdrv_co_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4570 sizeof(offset), &offset, 0); 4571 if (ret < 0) { 4572 error_setg_errno(errp, -ret, "Failed to update the image size"); 4573 goto fail; 4574 } 4575 4576 s->l1_vm_state_index = new_l1_size; 4577 4578 /* Update cache sizes */ 4579 options = qdict_clone_shallow(bs->options); 4580 ret = qcow2_update_options(bs, options, s->flags, errp); 4581 qobject_unref(options); 4582 if (ret < 0) { 4583 goto fail; 4584 } 4585 ret = 0; 4586 fail: 4587 qemu_co_mutex_unlock(&s->lock); 4588 return ret; 4589 } 4590 4591 static int coroutine_fn GRAPH_RDLOCK 4592 qcow2_co_pwritev_compressed_task(BlockDriverState *bs, 4593 uint64_t offset, uint64_t bytes, 4594 QEMUIOVector *qiov, size_t qiov_offset) 4595 { 4596 BDRVQcow2State *s = bs->opaque; 4597 int ret; 4598 ssize_t out_len; 4599 uint8_t *buf, *out_buf; 4600 uint64_t cluster_offset; 4601 4602 assert(bytes == s->cluster_size || (bytes < s->cluster_size && 4603 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); 4604 4605 buf = qemu_blockalign(bs, s->cluster_size); 4606 if (bytes < s->cluster_size) { 4607 /* Zero-pad last write if image size is not cluster aligned */ 4608 memset(buf + bytes, 0, s->cluster_size - bytes); 4609 } 4610 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4611 4612 out_buf = g_malloc(s->cluster_size); 4613 4614 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4615 buf, s->cluster_size); 4616 if (out_len == -ENOMEM) { 4617 /* could not compress: write normal cluster */ 4618 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4619 if (ret < 0) { 4620 goto fail; 4621 } 4622 goto success; 4623 } else if (out_len < 0) { 4624 ret = -EINVAL; 4625 goto fail; 4626 } 4627 4628 qemu_co_mutex_lock(&s->lock); 4629 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4630 &cluster_offset); 4631 if (ret < 0) { 4632 qemu_co_mutex_unlock(&s->lock); 4633 goto fail; 4634 } 4635 4636 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4637 qemu_co_mutex_unlock(&s->lock); 4638 if (ret < 0) { 4639 goto fail; 4640 } 4641 4642 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4643 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4644 if (ret < 0) { 4645 goto fail; 4646 } 4647 success: 4648 ret = 0; 4649 fail: 4650 qemu_vfree(buf); 4651 g_free(out_buf); 4652 return ret; 4653 } 4654 4655 /* 4656 * This function can count as GRAPH_RDLOCK because 4657 * qcow2_co_pwritev_compressed_part() holds the graph lock and keeps it until 4658 * this coroutine has terminated. 4659 */ 4660 static int coroutine_fn GRAPH_RDLOCK 4661 qcow2_co_pwritev_compressed_task_entry(AioTask *task) 4662 { 4663 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 4664 4665 assert(!t->subcluster_type && !t->l2meta); 4666 4667 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, 4668 t->qiov_offset); 4669 } 4670 4671 /* 4672 * XXX: put compressed sectors first, then all the cluster aligned 4673 * tables to avoid losing bytes in alignment 4674 */ 4675 static int coroutine_fn GRAPH_RDLOCK 4676 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4677 int64_t offset, int64_t bytes, 4678 QEMUIOVector *qiov, size_t qiov_offset) 4679 { 4680 BDRVQcow2State *s = bs->opaque; 4681 AioTaskPool *aio = NULL; 4682 int ret = 0; 4683 4684 if (has_data_file(bs)) { 4685 return -ENOTSUP; 4686 } 4687 4688 if (bytes == 0) { 4689 /* 4690 * align end of file to a sector boundary to ease reading with 4691 * sector based I/Os 4692 */ 4693 int64_t len = bdrv_co_getlength(bs->file->bs); 4694 if (len < 0) { 4695 return len; 4696 } 4697 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0, 4698 NULL); 4699 } 4700 4701 if (offset_into_cluster(s, offset)) { 4702 return -EINVAL; 4703 } 4704 4705 if (offset_into_cluster(s, bytes) && 4706 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) { 4707 return -EINVAL; 4708 } 4709 4710 while (bytes && aio_task_pool_status(aio) == 0) { 4711 uint64_t chunk_size = MIN(bytes, s->cluster_size); 4712 4713 if (!aio && chunk_size != bytes) { 4714 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 4715 } 4716 4717 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, 4718 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); 4719 if (ret < 0) { 4720 break; 4721 } 4722 qiov_offset += chunk_size; 4723 offset += chunk_size; 4724 bytes -= chunk_size; 4725 } 4726 4727 if (aio) { 4728 aio_task_pool_wait_all(aio); 4729 if (ret == 0) { 4730 ret = aio_task_pool_status(aio); 4731 } 4732 g_free(aio); 4733 } 4734 4735 return ret; 4736 } 4737 4738 static int coroutine_fn GRAPH_RDLOCK 4739 qcow2_co_preadv_compressed(BlockDriverState *bs, 4740 uint64_t l2_entry, 4741 uint64_t offset, 4742 uint64_t bytes, 4743 QEMUIOVector *qiov, 4744 size_t qiov_offset) 4745 { 4746 BDRVQcow2State *s = bs->opaque; 4747 int ret = 0, csize; 4748 uint64_t coffset; 4749 uint8_t *buf, *out_buf; 4750 int offset_in_cluster = offset_into_cluster(s, offset); 4751 4752 qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize); 4753 4754 buf = g_try_malloc(csize); 4755 if (!buf) { 4756 return -ENOMEM; 4757 } 4758 4759 out_buf = qemu_blockalign(bs, s->cluster_size); 4760 4761 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4762 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4763 if (ret < 0) { 4764 goto fail; 4765 } 4766 4767 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4768 ret = -EIO; 4769 goto fail; 4770 } 4771 4772 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4773 4774 fail: 4775 qemu_vfree(out_buf); 4776 g_free(buf); 4777 4778 return ret; 4779 } 4780 4781 static int make_completely_empty(BlockDriverState *bs) 4782 { 4783 BDRVQcow2State *s = bs->opaque; 4784 Error *local_err = NULL; 4785 int ret, l1_clusters; 4786 int64_t offset; 4787 uint64_t *new_reftable = NULL; 4788 uint64_t rt_entry, l1_size2; 4789 struct { 4790 uint64_t l1_offset; 4791 uint64_t reftable_offset; 4792 uint32_t reftable_clusters; 4793 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4794 4795 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4796 if (ret < 0) { 4797 goto fail; 4798 } 4799 4800 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4801 if (ret < 0) { 4802 goto fail; 4803 } 4804 4805 /* Refcounts will be broken utterly */ 4806 ret = qcow2_mark_dirty(bs); 4807 if (ret < 0) { 4808 goto fail; 4809 } 4810 4811 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4812 4813 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 4814 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE; 4815 4816 /* After this call, neither the in-memory nor the on-disk refcount 4817 * information accurately describe the actual references */ 4818 4819 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4820 l1_clusters * s->cluster_size, 0); 4821 if (ret < 0) { 4822 goto fail_broken_refcounts; 4823 } 4824 memset(s->l1_table, 0, l1_size2); 4825 4826 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4827 4828 /* Overwrite enough clusters at the beginning of the sectors to place 4829 * the refcount table, a refcount block and the L1 table in; this may 4830 * overwrite parts of the existing refcount and L1 table, which is not 4831 * an issue because the dirty flag is set, complete data loss is in fact 4832 * desired and partial data loss is consequently fine as well */ 4833 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4834 (2 + l1_clusters) * s->cluster_size, 0); 4835 /* This call (even if it failed overall) may have overwritten on-disk 4836 * refcount structures; in that case, the in-memory refcount information 4837 * will probably differ from the on-disk information which makes the BDS 4838 * unusable */ 4839 if (ret < 0) { 4840 goto fail_broken_refcounts; 4841 } 4842 4843 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4844 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4845 4846 /* "Create" an empty reftable (one cluster) directly after the image 4847 * header and an empty L1 table three clusters after the image header; 4848 * the cluster between those two will be used as the first refblock */ 4849 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4850 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4851 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4852 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4853 sizeof(l1_ofs_rt_ofs_cls), &l1_ofs_rt_ofs_cls, 0); 4854 if (ret < 0) { 4855 goto fail_broken_refcounts; 4856 } 4857 4858 s->l1_table_offset = 3 * s->cluster_size; 4859 4860 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE); 4861 if (!new_reftable) { 4862 ret = -ENOMEM; 4863 goto fail_broken_refcounts; 4864 } 4865 4866 s->refcount_table_offset = s->cluster_size; 4867 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE; 4868 s->max_refcount_table_index = 0; 4869 4870 g_free(s->refcount_table); 4871 s->refcount_table = new_reftable; 4872 new_reftable = NULL; 4873 4874 /* Now the in-memory refcount information again corresponds to the on-disk 4875 * information (reftable is empty and no refblocks (the refblock cache is 4876 * empty)); however, this means some clusters (e.g. the image header) are 4877 * referenced, but not refcounted, but the normal qcow2 code assumes that 4878 * the in-memory information is always correct */ 4879 4880 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4881 4882 /* Enter the first refblock into the reftable */ 4883 rt_entry = cpu_to_be64(2 * s->cluster_size); 4884 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, sizeof(rt_entry), 4885 &rt_entry, 0); 4886 if (ret < 0) { 4887 goto fail_broken_refcounts; 4888 } 4889 s->refcount_table[0] = 2 * s->cluster_size; 4890 4891 s->free_cluster_index = 0; 4892 assert(3 + l1_clusters <= s->refcount_block_size); 4893 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4894 if (offset < 0) { 4895 ret = offset; 4896 goto fail_broken_refcounts; 4897 } else if (offset > 0) { 4898 error_report("First cluster in emptied image is in use"); 4899 abort(); 4900 } 4901 4902 /* Now finally the in-memory information corresponds to the on-disk 4903 * structures and is correct */ 4904 ret = qcow2_mark_clean(bs); 4905 if (ret < 0) { 4906 goto fail; 4907 } 4908 4909 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false, 4910 PREALLOC_MODE_OFF, 0, &local_err); 4911 if (ret < 0) { 4912 error_report_err(local_err); 4913 goto fail; 4914 } 4915 4916 return 0; 4917 4918 fail_broken_refcounts: 4919 /* The BDS is unusable at this point. If we wanted to make it usable, we 4920 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4921 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4922 * again. However, because the functions which could have caused this error 4923 * path to be taken are used by those functions as well, it's very likely 4924 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4925 bs->drv = NULL; 4926 4927 fail: 4928 g_free(new_reftable); 4929 return ret; 4930 } 4931 4932 static int qcow2_make_empty(BlockDriverState *bs) 4933 { 4934 BDRVQcow2State *s = bs->opaque; 4935 uint64_t offset, end_offset; 4936 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4937 int l1_clusters, ret = 0; 4938 4939 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE); 4940 4941 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4942 3 + l1_clusters <= s->refcount_block_size && 4943 s->crypt_method_header != QCOW_CRYPT_LUKS && 4944 !has_data_file(bs)) { 4945 /* The following function only works for qcow2 v3 images (it 4946 * requires the dirty flag) and only as long as there are no 4947 * features that reserve extra clusters (such as snapshots, 4948 * LUKS header, or persistent bitmaps), because it completely 4949 * empties the image. Furthermore, the L1 table and three 4950 * additional clusters (image header, refcount table, one 4951 * refcount block) have to fit inside one refcount block. It 4952 * only resets the image file, i.e. does not work with an 4953 * external data file. */ 4954 return make_completely_empty(bs); 4955 } 4956 4957 /* This fallback code simply discards every active cluster; this is slow, 4958 * but works in all cases */ 4959 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4960 for (offset = 0; offset < end_offset; offset += step) { 4961 /* As this function is generally used after committing an external 4962 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4963 * default action for this kind of discard is to pass the discard, 4964 * which will ideally result in an actually smaller image file, as 4965 * is probably desired. */ 4966 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4967 QCOW2_DISCARD_SNAPSHOT, true); 4968 if (ret < 0) { 4969 break; 4970 } 4971 } 4972 4973 return ret; 4974 } 4975 4976 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4977 { 4978 BDRVQcow2State *s = bs->opaque; 4979 int ret; 4980 4981 qemu_co_mutex_lock(&s->lock); 4982 ret = qcow2_write_caches(bs); 4983 qemu_co_mutex_unlock(&s->lock); 4984 4985 return ret; 4986 } 4987 4988 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4989 Error **errp) 4990 { 4991 Error *local_err = NULL; 4992 BlockMeasureInfo *info; 4993 uint64_t required = 0; /* bytes that contribute to required size */ 4994 uint64_t virtual_size; /* disk size as seen by guest */ 4995 uint64_t refcount_bits; 4996 uint64_t l2_tables; 4997 uint64_t luks_payload_size = 0; 4998 size_t cluster_size; 4999 int version; 5000 char *optstr; 5001 PreallocMode prealloc; 5002 bool has_backing_file; 5003 bool has_luks; 5004 bool extended_l2; 5005 size_t l2e_size; 5006 5007 /* Parse image creation options */ 5008 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false); 5009 5010 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2, 5011 &local_err); 5012 if (local_err) { 5013 goto err; 5014 } 5015 5016 version = qcow2_opt_get_version_del(opts, &local_err); 5017 if (local_err) { 5018 goto err; 5019 } 5020 5021 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 5022 if (local_err) { 5023 goto err; 5024 } 5025 5026 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 5027 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 5028 PREALLOC_MODE_OFF, &local_err); 5029 g_free(optstr); 5030 if (local_err) { 5031 goto err; 5032 } 5033 5034 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 5035 has_backing_file = !!optstr; 5036 g_free(optstr); 5037 5038 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 5039 has_luks = optstr && strcmp(optstr, "luks") == 0; 5040 g_free(optstr); 5041 5042 if (has_luks) { 5043 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; 5044 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp); 5045 size_t headerlen; 5046 5047 create_opts = block_crypto_create_opts_init(cryptoopts, errp); 5048 qobject_unref(cryptoopts); 5049 if (!create_opts) { 5050 goto err; 5051 } 5052 5053 if (!qcrypto_block_calculate_payload_offset(create_opts, 5054 "encrypt.", 5055 &headerlen, 5056 &local_err)) { 5057 goto err; 5058 } 5059 5060 luks_payload_size = ROUND_UP(headerlen, cluster_size); 5061 } 5062 5063 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 5064 virtual_size = ROUND_UP(virtual_size, cluster_size); 5065 5066 /* Check that virtual disk size is valid */ 5067 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL; 5068 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 5069 cluster_size / l2e_size); 5070 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) { 5071 error_setg(&local_err, "The image size is too large " 5072 "(try using a larger cluster size)"); 5073 goto err; 5074 } 5075 5076 /* Account for input image */ 5077 if (in_bs) { 5078 int64_t ssize = bdrv_getlength(in_bs); 5079 if (ssize < 0) { 5080 error_setg_errno(&local_err, -ssize, 5081 "Unable to get image virtual_size"); 5082 goto err; 5083 } 5084 5085 virtual_size = ROUND_UP(ssize, cluster_size); 5086 5087 if (has_backing_file) { 5088 /* We don't how much of the backing chain is shared by the input 5089 * image and the new image file. In the worst case the new image's 5090 * backing file has nothing in common with the input image. Be 5091 * conservative and assume all clusters need to be written. 5092 */ 5093 required = virtual_size; 5094 } else { 5095 int64_t offset; 5096 int64_t pnum = 0; 5097 5098 for (offset = 0; offset < ssize; offset += pnum) { 5099 int ret; 5100 5101 ret = bdrv_block_status_above(in_bs, NULL, offset, 5102 ssize - offset, &pnum, NULL, 5103 NULL); 5104 if (ret < 0) { 5105 error_setg_errno(&local_err, -ret, 5106 "Unable to get block status"); 5107 goto err; 5108 } 5109 5110 if (ret & BDRV_BLOCK_ZERO) { 5111 /* Skip zero regions (safe with no backing file) */ 5112 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 5113 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 5114 /* Extend pnum to end of cluster for next iteration */ 5115 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 5116 5117 /* Count clusters we've seen */ 5118 required += offset % cluster_size + pnum; 5119 } 5120 } 5121 } 5122 } 5123 5124 /* Take into account preallocation. Nothing special is needed for 5125 * PREALLOC_MODE_METADATA since metadata is always counted. 5126 */ 5127 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 5128 required = virtual_size; 5129 } 5130 5131 info = g_new0(BlockMeasureInfo, 1); 5132 info->fully_allocated = luks_payload_size + 5133 qcow2_calc_prealloc_size(virtual_size, cluster_size, 5134 ctz32(refcount_bits), extended_l2); 5135 5136 /* 5137 * Remove data clusters that are not required. This overestimates the 5138 * required size because metadata needed for the fully allocated file is 5139 * still counted. Show bitmaps only if both source and destination 5140 * would support them. 5141 */ 5142 info->required = info->fully_allocated - virtual_size + required; 5143 info->has_bitmaps = version >= 3 && in_bs && 5144 bdrv_supports_persistent_dirty_bitmap(in_bs); 5145 if (info->has_bitmaps) { 5146 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs, 5147 cluster_size); 5148 } 5149 return info; 5150 5151 err: 5152 error_propagate(errp, local_err); 5153 return NULL; 5154 } 5155 5156 static int coroutine_fn 5157 qcow2_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 5158 { 5159 BDRVQcow2State *s = bs->opaque; 5160 bdi->cluster_size = s->cluster_size; 5161 bdi->vm_state_offset = qcow2_vm_state_offset(s); 5162 bdi->is_dirty = s->incompatible_features & QCOW2_INCOMPAT_DIRTY; 5163 return 0; 5164 } 5165 5166 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 5167 Error **errp) 5168 { 5169 BDRVQcow2State *s = bs->opaque; 5170 ImageInfoSpecific *spec_info; 5171 QCryptoBlockInfo *encrypt_info = NULL; 5172 5173 if (s->crypto != NULL) { 5174 encrypt_info = qcrypto_block_get_info(s->crypto, errp); 5175 if (!encrypt_info) { 5176 return NULL; 5177 } 5178 } 5179 5180 spec_info = g_new(ImageInfoSpecific, 1); 5181 *spec_info = (ImageInfoSpecific){ 5182 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 5183 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 5184 }; 5185 if (s->qcow_version == 2) { 5186 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5187 .compat = g_strdup("0.10"), 5188 .refcount_bits = s->refcount_bits, 5189 }; 5190 } else if (s->qcow_version == 3) { 5191 Qcow2BitmapInfoList *bitmaps; 5192 if (!qcow2_get_bitmap_info_list(bs, &bitmaps, errp)) { 5193 qapi_free_ImageInfoSpecific(spec_info); 5194 qapi_free_QCryptoBlockInfo(encrypt_info); 5195 return NULL; 5196 } 5197 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 5198 .compat = g_strdup("1.1"), 5199 .lazy_refcounts = s->compatible_features & 5200 QCOW2_COMPAT_LAZY_REFCOUNTS, 5201 .has_lazy_refcounts = true, 5202 .corrupt = s->incompatible_features & 5203 QCOW2_INCOMPAT_CORRUPT, 5204 .has_corrupt = true, 5205 .has_extended_l2 = true, 5206 .extended_l2 = has_subclusters(s), 5207 .refcount_bits = s->refcount_bits, 5208 .has_bitmaps = !!bitmaps, 5209 .bitmaps = bitmaps, 5210 .data_file = g_strdup(s->image_data_file), 5211 .has_data_file_raw = has_data_file(bs), 5212 .data_file_raw = data_file_is_raw(bs), 5213 .compression_type = s->compression_type, 5214 }; 5215 } else { 5216 /* if this assertion fails, this probably means a new version was 5217 * added without having it covered here */ 5218 assert(false); 5219 } 5220 5221 if (encrypt_info) { 5222 ImageInfoSpecificQCow2Encryption *qencrypt = 5223 g_new(ImageInfoSpecificQCow2Encryption, 1); 5224 switch (encrypt_info->format) { 5225 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 5226 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 5227 break; 5228 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 5229 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 5230 qencrypt->u.luks = encrypt_info->u.luks; 5231 break; 5232 default: 5233 abort(); 5234 } 5235 /* Since we did shallow copy above, erase any pointers 5236 * in the original info */ 5237 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 5238 qapi_free_QCryptoBlockInfo(encrypt_info); 5239 5240 spec_info->u.qcow2.data->encrypt = qencrypt; 5241 } 5242 5243 return spec_info; 5244 } 5245 5246 static int qcow2_has_zero_init(BlockDriverState *bs) 5247 { 5248 BDRVQcow2State *s = bs->opaque; 5249 bool preallocated; 5250 5251 if (qemu_in_coroutine()) { 5252 qemu_co_mutex_lock(&s->lock); 5253 } 5254 /* 5255 * Check preallocation status: Preallocated images have all L2 5256 * tables allocated, nonpreallocated images have none. It is 5257 * therefore enough to check the first one. 5258 */ 5259 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 5260 if (qemu_in_coroutine()) { 5261 qemu_co_mutex_unlock(&s->lock); 5262 } 5263 5264 if (!preallocated) { 5265 return 1; 5266 } else if (bs->encrypted) { 5267 return 0; 5268 } else { 5269 return bdrv_has_zero_init(s->data_file->bs); 5270 } 5271 } 5272 5273 /* 5274 * Check the request to vmstate. On success return 5275 * qcow2_vm_state_offset(bs) + @pos 5276 */ 5277 static int64_t qcow2_check_vmstate_request(BlockDriverState *bs, 5278 QEMUIOVector *qiov, int64_t pos) 5279 { 5280 BDRVQcow2State *s = bs->opaque; 5281 int64_t vmstate_offset = qcow2_vm_state_offset(s); 5282 int ret; 5283 5284 /* Incoming requests must be OK */ 5285 bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort); 5286 5287 if (INT64_MAX - pos < vmstate_offset) { 5288 return -EIO; 5289 } 5290 5291 pos += vmstate_offset; 5292 ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); 5293 if (ret < 0) { 5294 return ret; 5295 } 5296 5297 return pos; 5298 } 5299 5300 static int coroutine_fn GRAPH_RDLOCK 5301 qcow2_co_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5302 { 5303 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5304 if (offset < 0) { 5305 return offset; 5306 } 5307 5308 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 5309 return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); 5310 } 5311 5312 static int coroutine_fn GRAPH_RDLOCK 5313 qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) 5314 { 5315 int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); 5316 if (offset < 0) { 5317 return offset; 5318 } 5319 5320 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 5321 return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); 5322 } 5323 5324 static int qcow2_has_compressed_clusters(BlockDriverState *bs) 5325 { 5326 int64_t offset = 0; 5327 int64_t bytes = bdrv_getlength(bs); 5328 5329 if (bytes < 0) { 5330 return bytes; 5331 } 5332 5333 while (bytes != 0) { 5334 int ret; 5335 QCow2SubclusterType type; 5336 unsigned int cur_bytes = MIN(INT_MAX, bytes); 5337 uint64_t host_offset; 5338 5339 ret = qcow2_get_host_offset(bs, offset, &cur_bytes, &host_offset, 5340 &type); 5341 if (ret < 0) { 5342 return ret; 5343 } 5344 5345 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 5346 return 1; 5347 } 5348 5349 offset += cur_bytes; 5350 bytes -= cur_bytes; 5351 } 5352 5353 return 0; 5354 } 5355 5356 /* 5357 * Downgrades an image's version. To achieve this, any incompatible features 5358 * have to be removed. 5359 */ 5360 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 5361 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5362 Error **errp) 5363 { 5364 BDRVQcow2State *s = bs->opaque; 5365 int current_version = s->qcow_version; 5366 int ret; 5367 int i; 5368 5369 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 5370 assert(target_version < current_version); 5371 5372 /* There are no other versions (now) that you can downgrade to */ 5373 assert(target_version == 2); 5374 5375 if (s->refcount_order != 4) { 5376 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 5377 return -ENOTSUP; 5378 } 5379 5380 if (has_data_file(bs)) { 5381 error_setg(errp, "Cannot downgrade an image with a data file"); 5382 return -ENOTSUP; 5383 } 5384 5385 /* 5386 * If any internal snapshot has a different size than the current 5387 * image size, or VM state size that exceeds 32 bits, downgrading 5388 * is unsafe. Even though we would still use v3-compliant output 5389 * to preserve that data, other v2 programs might not realize 5390 * those optional fields are important. 5391 */ 5392 for (i = 0; i < s->nb_snapshots; i++) { 5393 if (s->snapshots[i].vm_state_size > UINT32_MAX || 5394 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) { 5395 error_setg(errp, "Internal snapshots prevent downgrade of image"); 5396 return -ENOTSUP; 5397 } 5398 } 5399 5400 /* clear incompatible features */ 5401 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 5402 ret = qcow2_mark_clean(bs); 5403 if (ret < 0) { 5404 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5405 return ret; 5406 } 5407 } 5408 5409 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 5410 * the first place; if that happens nonetheless, returning -ENOTSUP is the 5411 * best thing to do anyway */ 5412 5413 if (s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION) { 5414 error_setg(errp, "Cannot downgrade an image with incompatible features " 5415 "0x%" PRIx64 " set", 5416 s->incompatible_features & ~QCOW2_INCOMPAT_COMPRESSION); 5417 return -ENOTSUP; 5418 } 5419 5420 /* since we can ignore compatible features, we can set them to 0 as well */ 5421 s->compatible_features = 0; 5422 /* if lazy refcounts have been used, they have already been fixed through 5423 * clearing the dirty flag */ 5424 5425 /* clearing autoclear features is trivial */ 5426 s->autoclear_features = 0; 5427 5428 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 5429 if (ret < 0) { 5430 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 5431 return ret; 5432 } 5433 5434 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) { 5435 ret = qcow2_has_compressed_clusters(bs); 5436 if (ret < 0) { 5437 error_setg(errp, "Failed to check block status"); 5438 return -EINVAL; 5439 } 5440 if (ret) { 5441 error_setg(errp, "Cannot downgrade an image with zstd compression " 5442 "type and existing compressed clusters"); 5443 return -ENOTSUP; 5444 } 5445 /* 5446 * No compressed clusters for now, so just chose default zlib 5447 * compression. 5448 */ 5449 s->incompatible_features &= ~QCOW2_INCOMPAT_COMPRESSION; 5450 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB; 5451 } 5452 5453 assert(s->incompatible_features == 0); 5454 5455 s->qcow_version = target_version; 5456 ret = qcow2_update_header(bs); 5457 if (ret < 0) { 5458 s->qcow_version = current_version; 5459 error_setg_errno(errp, -ret, "Failed to update the image header"); 5460 return ret; 5461 } 5462 return 0; 5463 } 5464 5465 /* 5466 * Upgrades an image's version. While newer versions encompass all 5467 * features of older versions, some things may have to be presented 5468 * differently. 5469 */ 5470 static int qcow2_upgrade(BlockDriverState *bs, int target_version, 5471 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5472 Error **errp) 5473 { 5474 BDRVQcow2State *s = bs->opaque; 5475 bool need_snapshot_update; 5476 int current_version = s->qcow_version; 5477 int i; 5478 int ret; 5479 5480 /* This is qcow2_upgrade(), not qcow2_downgrade() */ 5481 assert(target_version > current_version); 5482 5483 /* There are no other versions (yet) that you can upgrade to */ 5484 assert(target_version == 3); 5485 5486 status_cb(bs, 0, 2, cb_opaque); 5487 5488 /* 5489 * In v2, snapshots do not need to have extra data. v3 requires 5490 * the 64-bit VM state size and the virtual disk size to be 5491 * present. 5492 * qcow2_write_snapshots() will always write the list in the 5493 * v3-compliant format. 5494 */ 5495 need_snapshot_update = false; 5496 for (i = 0; i < s->nb_snapshots; i++) { 5497 if (s->snapshots[i].extra_data_size < 5498 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) + 5499 sizeof_field(QCowSnapshotExtraData, disk_size)) 5500 { 5501 need_snapshot_update = true; 5502 break; 5503 } 5504 } 5505 if (need_snapshot_update) { 5506 ret = qcow2_write_snapshots(bs); 5507 if (ret < 0) { 5508 error_setg_errno(errp, -ret, "Failed to update the snapshot table"); 5509 return ret; 5510 } 5511 } 5512 status_cb(bs, 1, 2, cb_opaque); 5513 5514 s->qcow_version = target_version; 5515 ret = qcow2_update_header(bs); 5516 if (ret < 0) { 5517 s->qcow_version = current_version; 5518 error_setg_errno(errp, -ret, "Failed to update the image header"); 5519 return ret; 5520 } 5521 status_cb(bs, 2, 2, cb_opaque); 5522 5523 return 0; 5524 } 5525 5526 typedef enum Qcow2AmendOperation { 5527 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 5528 * statically initialized to so that the helper CB can discern the first 5529 * invocation from an operation change */ 5530 QCOW2_NO_OPERATION = 0, 5531 5532 QCOW2_UPGRADING, 5533 QCOW2_UPDATING_ENCRYPTION, 5534 QCOW2_CHANGING_REFCOUNT_ORDER, 5535 QCOW2_DOWNGRADING, 5536 } Qcow2AmendOperation; 5537 5538 typedef struct Qcow2AmendHelperCBInfo { 5539 /* The code coordinating the amend operations should only modify 5540 * these four fields; the rest will be managed by the CB */ 5541 BlockDriverAmendStatusCB *original_status_cb; 5542 void *original_cb_opaque; 5543 5544 Qcow2AmendOperation current_operation; 5545 5546 /* Total number of operations to perform (only set once) */ 5547 int total_operations; 5548 5549 /* The following fields are managed by the CB */ 5550 5551 /* Number of operations completed */ 5552 int operations_completed; 5553 5554 /* Cumulative offset of all completed operations */ 5555 int64_t offset_completed; 5556 5557 Qcow2AmendOperation last_operation; 5558 int64_t last_work_size; 5559 } Qcow2AmendHelperCBInfo; 5560 5561 static void qcow2_amend_helper_cb(BlockDriverState *bs, 5562 int64_t operation_offset, 5563 int64_t operation_work_size, void *opaque) 5564 { 5565 Qcow2AmendHelperCBInfo *info = opaque; 5566 int64_t current_work_size; 5567 int64_t projected_work_size; 5568 5569 if (info->current_operation != info->last_operation) { 5570 if (info->last_operation != QCOW2_NO_OPERATION) { 5571 info->offset_completed += info->last_work_size; 5572 info->operations_completed++; 5573 } 5574 5575 info->last_operation = info->current_operation; 5576 } 5577 5578 assert(info->total_operations > 0); 5579 assert(info->operations_completed < info->total_operations); 5580 5581 info->last_work_size = operation_work_size; 5582 5583 current_work_size = info->offset_completed + operation_work_size; 5584 5585 /* current_work_size is the total work size for (operations_completed + 1) 5586 * operations (which includes this one), so multiply it by the number of 5587 * operations not covered and divide it by the number of operations 5588 * covered to get a projection for the operations not covered */ 5589 projected_work_size = current_work_size * (info->total_operations - 5590 info->operations_completed - 1) 5591 / (info->operations_completed + 1); 5592 5593 info->original_status_cb(bs, info->offset_completed + operation_offset, 5594 current_work_size + projected_work_size, 5595 info->original_cb_opaque); 5596 } 5597 5598 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 5599 BlockDriverAmendStatusCB *status_cb, 5600 void *cb_opaque, 5601 bool force, 5602 Error **errp) 5603 { 5604 BDRVQcow2State *s = bs->opaque; 5605 int old_version = s->qcow_version, new_version = old_version; 5606 uint64_t new_size = 0; 5607 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 5608 bool lazy_refcounts = s->use_lazy_refcounts; 5609 bool data_file_raw = data_file_is_raw(bs); 5610 const char *compat = NULL; 5611 int refcount_bits = s->refcount_bits; 5612 int ret; 5613 QemuOptDesc *desc = opts->list->desc; 5614 Qcow2AmendHelperCBInfo helper_cb_info; 5615 bool encryption_update = false; 5616 5617 while (desc && desc->name) { 5618 if (!qemu_opt_find(opts, desc->name)) { 5619 /* only change explicitly defined options */ 5620 desc++; 5621 continue; 5622 } 5623 5624 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5625 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5626 if (!compat) { 5627 /* preserve default */ 5628 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5629 new_version = 2; 5630 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5631 new_version = 3; 5632 } else { 5633 error_setg(errp, "Unknown compatibility level %s", compat); 5634 return -EINVAL; 5635 } 5636 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5637 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5638 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5639 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5640 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5641 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5642 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5643 if (!s->crypto) { 5644 error_setg(errp, 5645 "Can't amend encryption options - encryption not present"); 5646 return -EINVAL; 5647 } 5648 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5649 error_setg(errp, 5650 "Only LUKS encryption options can be amended"); 5651 return -ENOTSUP; 5652 } 5653 encryption_update = true; 5654 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5655 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5656 lazy_refcounts); 5657 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5658 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5659 refcount_bits); 5660 5661 if (refcount_bits <= 0 || refcount_bits > 64 || 5662 !is_power_of_2(refcount_bits)) 5663 { 5664 error_setg(errp, "Refcount width must be a power of two and " 5665 "may not exceed 64 bits"); 5666 return -EINVAL; 5667 } 5668 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5669 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5670 if (data_file && !has_data_file(bs)) { 5671 error_setg(errp, "data-file can only be set for images that " 5672 "use an external data file"); 5673 return -EINVAL; 5674 } 5675 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5676 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5677 data_file_raw); 5678 if (data_file_raw && !data_file_is_raw(bs)) { 5679 error_setg(errp, "data-file-raw cannot be set on existing " 5680 "images"); 5681 return -EINVAL; 5682 } 5683 } else { 5684 /* if this point is reached, this probably means a new option was 5685 * added without having it covered here */ 5686 abort(); 5687 } 5688 5689 desc++; 5690 } 5691 5692 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5693 .original_status_cb = status_cb, 5694 .original_cb_opaque = cb_opaque, 5695 .total_operations = (new_version != old_version) 5696 + (s->refcount_bits != refcount_bits) + 5697 (encryption_update == true) 5698 }; 5699 5700 /* Upgrade first (some features may require compat=1.1) */ 5701 if (new_version > old_version) { 5702 helper_cb_info.current_operation = QCOW2_UPGRADING; 5703 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb, 5704 &helper_cb_info, errp); 5705 if (ret < 0) { 5706 return ret; 5707 } 5708 } 5709 5710 if (encryption_update) { 5711 QDict *amend_opts_dict; 5712 QCryptoBlockAmendOptions *amend_opts; 5713 5714 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION; 5715 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp); 5716 if (!amend_opts_dict) { 5717 return -EINVAL; 5718 } 5719 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp); 5720 qobject_unref(amend_opts_dict); 5721 if (!amend_opts) { 5722 return -EINVAL; 5723 } 5724 ret = qcrypto_block_amend_options(s->crypto, 5725 qcow2_crypto_hdr_read_func, 5726 qcow2_crypto_hdr_write_func, 5727 bs, 5728 amend_opts, 5729 force, 5730 errp); 5731 qapi_free_QCryptoBlockAmendOptions(amend_opts); 5732 if (ret < 0) { 5733 return ret; 5734 } 5735 } 5736 5737 if (s->refcount_bits != refcount_bits) { 5738 int refcount_order = ctz32(refcount_bits); 5739 5740 if (new_version < 3 && refcount_bits != 16) { 5741 error_setg(errp, "Refcount widths other than 16 bits require " 5742 "compatibility level 1.1 or above (use compat=1.1 or " 5743 "greater)"); 5744 return -EINVAL; 5745 } 5746 5747 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5748 ret = qcow2_change_refcount_order(bs, refcount_order, 5749 &qcow2_amend_helper_cb, 5750 &helper_cb_info, errp); 5751 if (ret < 0) { 5752 return ret; 5753 } 5754 } 5755 5756 /* data-file-raw blocks backing files, so clear it first if requested */ 5757 if (data_file_raw) { 5758 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5759 } else { 5760 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5761 } 5762 5763 if (data_file) { 5764 g_free(s->image_data_file); 5765 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5766 } 5767 5768 ret = qcow2_update_header(bs); 5769 if (ret < 0) { 5770 error_setg_errno(errp, -ret, "Failed to update the image header"); 5771 return ret; 5772 } 5773 5774 if (backing_file || backing_format) { 5775 if (g_strcmp0(backing_file, s->image_backing_file) || 5776 g_strcmp0(backing_format, s->image_backing_format)) { 5777 error_setg(errp, "Cannot amend the backing file"); 5778 error_append_hint(errp, 5779 "You can use 'qemu-img rebase' instead.\n"); 5780 return -EINVAL; 5781 } 5782 } 5783 5784 if (s->use_lazy_refcounts != lazy_refcounts) { 5785 if (lazy_refcounts) { 5786 if (new_version < 3) { 5787 error_setg(errp, "Lazy refcounts only supported with " 5788 "compatibility level 1.1 and above (use compat=1.1 " 5789 "or greater)"); 5790 return -EINVAL; 5791 } 5792 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5793 ret = qcow2_update_header(bs); 5794 if (ret < 0) { 5795 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5796 error_setg_errno(errp, -ret, "Failed to update the image header"); 5797 return ret; 5798 } 5799 s->use_lazy_refcounts = true; 5800 } else { 5801 /* make image clean first */ 5802 ret = qcow2_mark_clean(bs); 5803 if (ret < 0) { 5804 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5805 return ret; 5806 } 5807 /* now disallow lazy refcounts */ 5808 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5809 ret = qcow2_update_header(bs); 5810 if (ret < 0) { 5811 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5812 error_setg_errno(errp, -ret, "Failed to update the image header"); 5813 return ret; 5814 } 5815 s->use_lazy_refcounts = false; 5816 } 5817 } 5818 5819 if (new_size) { 5820 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, 5821 errp); 5822 if (!blk) { 5823 return -EPERM; 5824 } 5825 5826 /* 5827 * Amending image options should ensure that the image has 5828 * exactly the given new values, so pass exact=true here. 5829 */ 5830 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp); 5831 blk_unref(blk); 5832 if (ret < 0) { 5833 return ret; 5834 } 5835 } 5836 5837 /* Downgrade last (so unsupported features can be removed before) */ 5838 if (new_version < old_version) { 5839 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5840 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5841 &helper_cb_info, errp); 5842 if (ret < 0) { 5843 return ret; 5844 } 5845 } 5846 5847 return 0; 5848 } 5849 5850 static int coroutine_fn qcow2_co_amend(BlockDriverState *bs, 5851 BlockdevAmendOptions *opts, 5852 bool force, 5853 Error **errp) 5854 { 5855 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2; 5856 BDRVQcow2State *s = bs->opaque; 5857 int ret = 0; 5858 5859 if (qopts->encrypt) { 5860 if (!s->crypto) { 5861 error_setg(errp, "image is not encrypted, can't amend"); 5862 return -EOPNOTSUPP; 5863 } 5864 5865 if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) { 5866 error_setg(errp, 5867 "Amend can't be used to change the qcow2 encryption format"); 5868 return -EOPNOTSUPP; 5869 } 5870 5871 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 5872 error_setg(errp, 5873 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend"); 5874 return -EOPNOTSUPP; 5875 } 5876 5877 ret = qcrypto_block_amend_options(s->crypto, 5878 qcow2_crypto_hdr_read_func, 5879 qcow2_crypto_hdr_write_func, 5880 bs, 5881 qopts->encrypt, 5882 force, 5883 errp); 5884 } 5885 return ret; 5886 } 5887 5888 /* 5889 * If offset or size are negative, respectively, they will not be included in 5890 * the BLOCK_IMAGE_CORRUPTED event emitted. 5891 * fatal will be ignored for read-only BDS; corruptions found there will always 5892 * be considered non-fatal. 5893 */ 5894 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5895 int64_t size, const char *message_format, ...) 5896 { 5897 BDRVQcow2State *s = bs->opaque; 5898 const char *node_name; 5899 char *message; 5900 va_list ap; 5901 5902 fatal = fatal && bdrv_is_writable(bs); 5903 5904 if (s->signaled_corruption && 5905 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5906 { 5907 return; 5908 } 5909 5910 va_start(ap, message_format); 5911 message = g_strdup_vprintf(message_format, ap); 5912 va_end(ap); 5913 5914 if (fatal) { 5915 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 5916 "corruption events will be suppressed\n", message); 5917 } else { 5918 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 5919 "corruption events will be suppressed\n", message); 5920 } 5921 5922 node_name = bdrv_get_node_name(bs); 5923 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 5924 *node_name ? node_name : NULL, 5925 message, offset >= 0, offset, 5926 size >= 0, size, 5927 fatal); 5928 g_free(message); 5929 5930 if (fatal) { 5931 qcow2_mark_corrupt(bs); 5932 bs->drv = NULL; /* make BDS unusable */ 5933 } 5934 5935 s->signaled_corruption = true; 5936 } 5937 5938 #define QCOW_COMMON_OPTIONS \ 5939 { \ 5940 .name = BLOCK_OPT_SIZE, \ 5941 .type = QEMU_OPT_SIZE, \ 5942 .help = "Virtual disk size" \ 5943 }, \ 5944 { \ 5945 .name = BLOCK_OPT_COMPAT_LEVEL, \ 5946 .type = QEMU_OPT_STRING, \ 5947 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \ 5948 }, \ 5949 { \ 5950 .name = BLOCK_OPT_BACKING_FILE, \ 5951 .type = QEMU_OPT_STRING, \ 5952 .help = "File name of a base image" \ 5953 }, \ 5954 { \ 5955 .name = BLOCK_OPT_BACKING_FMT, \ 5956 .type = QEMU_OPT_STRING, \ 5957 .help = "Image format of the base image" \ 5958 }, \ 5959 { \ 5960 .name = BLOCK_OPT_DATA_FILE, \ 5961 .type = QEMU_OPT_STRING, \ 5962 .help = "File name of an external data file" \ 5963 }, \ 5964 { \ 5965 .name = BLOCK_OPT_DATA_FILE_RAW, \ 5966 .type = QEMU_OPT_BOOL, \ 5967 .help = "The external data file must stay valid " \ 5968 "as a raw image" \ 5969 }, \ 5970 { \ 5971 .name = BLOCK_OPT_LAZY_REFCOUNTS, \ 5972 .type = QEMU_OPT_BOOL, \ 5973 .help = "Postpone refcount updates", \ 5974 .def_value_str = "off" \ 5975 }, \ 5976 { \ 5977 .name = BLOCK_OPT_REFCOUNT_BITS, \ 5978 .type = QEMU_OPT_NUMBER, \ 5979 .help = "Width of a reference count entry in bits", \ 5980 .def_value_str = "16" \ 5981 } 5982 5983 static QemuOptsList qcow2_create_opts = { 5984 .name = "qcow2-create-opts", 5985 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 5986 .desc = { 5987 { \ 5988 .name = BLOCK_OPT_ENCRYPT, \ 5989 .type = QEMU_OPT_BOOL, \ 5990 .help = "Encrypt the image with format 'aes'. (Deprecated " \ 5991 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \ 5992 }, \ 5993 { \ 5994 .name = BLOCK_OPT_ENCRYPT_FORMAT, \ 5995 .type = QEMU_OPT_STRING, \ 5996 .help = "Encrypt the image, format choices: 'aes', 'luks'", \ 5997 }, \ 5998 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \ 5999 "ID of secret providing qcow AES key or LUKS passphrase"), \ 6000 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \ 6001 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \ 6002 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \ 6003 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \ 6004 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \ 6005 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \ 6006 { \ 6007 .name = BLOCK_OPT_CLUSTER_SIZE, \ 6008 .type = QEMU_OPT_SIZE, \ 6009 .help = "qcow2 cluster size", \ 6010 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \ 6011 }, \ 6012 { \ 6013 .name = BLOCK_OPT_EXTL2, \ 6014 .type = QEMU_OPT_BOOL, \ 6015 .help = "Extended L2 tables", \ 6016 .def_value_str = "off" \ 6017 }, \ 6018 { \ 6019 .name = BLOCK_OPT_PREALLOC, \ 6020 .type = QEMU_OPT_STRING, \ 6021 .help = "Preallocation mode (allowed values: off, " \ 6022 "metadata, falloc, full)" \ 6023 }, \ 6024 { \ 6025 .name = BLOCK_OPT_COMPRESSION_TYPE, \ 6026 .type = QEMU_OPT_STRING, \ 6027 .help = "Compression method used for image cluster " \ 6028 "compression", \ 6029 .def_value_str = "zlib" \ 6030 }, 6031 QCOW_COMMON_OPTIONS, 6032 { /* end of list */ } 6033 } 6034 }; 6035 6036 static QemuOptsList qcow2_amend_opts = { 6037 .name = "qcow2-amend-opts", 6038 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head), 6039 .desc = { 6040 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."), 6041 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."), 6042 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."), 6043 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."), 6044 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 6045 QCOW_COMMON_OPTIONS, 6046 { /* end of list */ } 6047 } 6048 }; 6049 6050 static const char *const qcow2_strong_runtime_opts[] = { 6051 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 6052 6053 NULL 6054 }; 6055 6056 BlockDriver bdrv_qcow2 = { 6057 .format_name = "qcow2", 6058 .instance_size = sizeof(BDRVQcow2State), 6059 .bdrv_probe = qcow2_probe, 6060 .bdrv_open = qcow2_open, 6061 .bdrv_close = qcow2_close, 6062 .bdrv_reopen_prepare = qcow2_reopen_prepare, 6063 .bdrv_reopen_commit = qcow2_reopen_commit, 6064 .bdrv_reopen_commit_post = qcow2_reopen_commit_post, 6065 .bdrv_reopen_abort = qcow2_reopen_abort, 6066 .bdrv_join_options = qcow2_join_options, 6067 .bdrv_child_perm = bdrv_default_perms, 6068 .bdrv_co_create_opts = qcow2_co_create_opts, 6069 .bdrv_co_create = qcow2_co_create, 6070 .bdrv_has_zero_init = qcow2_has_zero_init, 6071 .bdrv_co_block_status = qcow2_co_block_status, 6072 6073 .bdrv_co_preadv_part = qcow2_co_preadv_part, 6074 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 6075 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 6076 6077 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 6078 .bdrv_co_pdiscard = qcow2_co_pdiscard, 6079 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 6080 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 6081 .bdrv_co_truncate = qcow2_co_truncate, 6082 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 6083 .bdrv_make_empty = qcow2_make_empty, 6084 6085 .bdrv_snapshot_create = qcow2_snapshot_create, 6086 .bdrv_snapshot_goto = qcow2_snapshot_goto, 6087 .bdrv_snapshot_delete = qcow2_snapshot_delete, 6088 .bdrv_snapshot_list = qcow2_snapshot_list, 6089 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 6090 .bdrv_measure = qcow2_measure, 6091 .bdrv_co_get_info = qcow2_co_get_info, 6092 .bdrv_get_specific_info = qcow2_get_specific_info, 6093 6094 .bdrv_co_save_vmstate = qcow2_co_save_vmstate, 6095 .bdrv_co_load_vmstate = qcow2_co_load_vmstate, 6096 6097 .is_format = true, 6098 .supports_backing = true, 6099 .bdrv_change_backing_file = qcow2_change_backing_file, 6100 6101 .bdrv_refresh_limits = qcow2_refresh_limits, 6102 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 6103 .bdrv_inactivate = qcow2_inactivate, 6104 6105 .create_opts = &qcow2_create_opts, 6106 .amend_opts = &qcow2_amend_opts, 6107 .strong_runtime_opts = qcow2_strong_runtime_opts, 6108 .mutable_opts = mutable_opts, 6109 .bdrv_co_check = qcow2_co_check, 6110 .bdrv_amend_options = qcow2_amend_options, 6111 .bdrv_co_amend = qcow2_co_amend, 6112 6113 .bdrv_detach_aio_context = qcow2_detach_aio_context, 6114 .bdrv_attach_aio_context = qcow2_attach_aio_context, 6115 6116 .bdrv_supports_persistent_dirty_bitmap = 6117 qcow2_supports_persistent_dirty_bitmap, 6118 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 6119 .bdrv_co_remove_persistent_dirty_bitmap = 6120 qcow2_co_remove_persistent_dirty_bitmap, 6121 }; 6122 6123 static void bdrv_qcow2_init(void) 6124 { 6125 bdrv_register(&bdrv_qcow2); 6126 } 6127 6128 block_init(bdrv_qcow2_init); 6129