1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "sysemu/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qapi/qmp/qdict.h" 36 #include "qapi/qmp/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qapi/qobject-input-visitor.h" 42 #include "qapi/qapi-visit-block-core.h" 43 #include "crypto.h" 44 #include "block/aio_task.h" 45 46 /* 47 Differences with QCOW: 48 49 - Support for multiple incremental snapshots. 50 - Memory management by reference counts. 51 - Clusters which have a reference count of one have the bit 52 QCOW_OFLAG_COPIED to optimize write performance. 53 - Size of compressed clusters is stored in sectors to reduce bit usage 54 in the cluster offsets. 55 - Support for storing additional data (such as the VM state) in the 56 snapshots. 57 - If a backing store is used, the cluster size is not constrained 58 (could be backported to QCOW). 59 - L2 tables have always a size of one cluster. 60 */ 61 62 63 typedef struct { 64 uint32_t magic; 65 uint32_t len; 66 } QEMU_PACKED QCowExtension; 67 68 #define QCOW2_EXT_MAGIC_END 0 69 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 70 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 71 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 72 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 73 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 74 75 static int coroutine_fn 76 qcow2_co_preadv_compressed(BlockDriverState *bs, 77 uint64_t file_cluster_offset, 78 uint64_t offset, 79 uint64_t bytes, 80 QEMUIOVector *qiov, 81 size_t qiov_offset); 82 83 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 84 { 85 const QCowHeader *cow_header = (const void *)buf; 86 87 if (buf_size >= sizeof(QCowHeader) && 88 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 89 be32_to_cpu(cow_header->version) >= 2) 90 return 100; 91 else 92 return 0; 93 } 94 95 96 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 97 uint8_t *buf, size_t buflen, 98 void *opaque, Error **errp) 99 { 100 BlockDriverState *bs = opaque; 101 BDRVQcow2State *s = bs->opaque; 102 ssize_t ret; 103 104 if ((offset + buflen) > s->crypto_header.length) { 105 error_setg(errp, "Request for data outside of extension header"); 106 return -1; 107 } 108 109 ret = bdrv_pread(bs->file, 110 s->crypto_header.offset + offset, buf, buflen); 111 if (ret < 0) { 112 error_setg_errno(errp, -ret, "Could not read encryption header"); 113 return -1; 114 } 115 return ret; 116 } 117 118 119 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 120 void *opaque, Error **errp) 121 { 122 BlockDriverState *bs = opaque; 123 BDRVQcow2State *s = bs->opaque; 124 int64_t ret; 125 int64_t clusterlen; 126 127 ret = qcow2_alloc_clusters(bs, headerlen); 128 if (ret < 0) { 129 error_setg_errno(errp, -ret, 130 "Cannot allocate cluster for LUKS header size %zu", 131 headerlen); 132 return -1; 133 } 134 135 s->crypto_header.length = headerlen; 136 s->crypto_header.offset = ret; 137 138 /* Zero fill remaining space in cluster so it has predictable 139 * content in case of future spec changes */ 140 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 141 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 142 ret = bdrv_pwrite_zeroes(bs->file, 143 ret + headerlen, 144 clusterlen - headerlen, 0); 145 if (ret < 0) { 146 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 147 return -1; 148 } 149 150 return ret; 151 } 152 153 154 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 155 const uint8_t *buf, size_t buflen, 156 void *opaque, Error **errp) 157 { 158 BlockDriverState *bs = opaque; 159 BDRVQcow2State *s = bs->opaque; 160 ssize_t ret; 161 162 if ((offset + buflen) > s->crypto_header.length) { 163 error_setg(errp, "Request for data outside of extension header"); 164 return -1; 165 } 166 167 ret = bdrv_pwrite(bs->file, 168 s->crypto_header.offset + offset, buf, buflen); 169 if (ret < 0) { 170 error_setg_errno(errp, -ret, "Could not read encryption header"); 171 return -1; 172 } 173 return ret; 174 } 175 176 177 /* 178 * read qcow2 extension and fill bs 179 * start reading from start_offset 180 * finish reading upon magic of value 0 or when end_offset reached 181 * unknown magic is skipped (future extension this version knows nothing about) 182 * return 0 upon success, non-0 otherwise 183 */ 184 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 185 uint64_t end_offset, void **p_feature_table, 186 int flags, bool *need_update_header, 187 Error **errp) 188 { 189 BDRVQcow2State *s = bs->opaque; 190 QCowExtension ext; 191 uint64_t offset; 192 int ret; 193 Qcow2BitmapHeaderExt bitmaps_ext; 194 195 if (need_update_header != NULL) { 196 *need_update_header = false; 197 } 198 199 #ifdef DEBUG_EXT 200 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 201 #endif 202 offset = start_offset; 203 while (offset < end_offset) { 204 205 #ifdef DEBUG_EXT 206 /* Sanity check */ 207 if (offset > s->cluster_size) 208 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 209 210 printf("attempting to read extended header in offset %lu\n", offset); 211 #endif 212 213 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 214 if (ret < 0) { 215 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 216 "pread fail from offset %" PRIu64, offset); 217 return 1; 218 } 219 ext.magic = be32_to_cpu(ext.magic); 220 ext.len = be32_to_cpu(ext.len); 221 offset += sizeof(ext); 222 #ifdef DEBUG_EXT 223 printf("ext.magic = 0x%x\n", ext.magic); 224 #endif 225 if (offset > end_offset || ext.len > end_offset - offset) { 226 error_setg(errp, "Header extension too large"); 227 return -EINVAL; 228 } 229 230 switch (ext.magic) { 231 case QCOW2_EXT_MAGIC_END: 232 return 0; 233 234 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 235 if (ext.len >= sizeof(bs->backing_format)) { 236 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 237 " too large (>=%zu)", ext.len, 238 sizeof(bs->backing_format)); 239 return 2; 240 } 241 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 242 if (ret < 0) { 243 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 244 "Could not read format name"); 245 return 3; 246 } 247 bs->backing_format[ext.len] = '\0'; 248 s->image_backing_format = g_strdup(bs->backing_format); 249 #ifdef DEBUG_EXT 250 printf("Qcow2: Got format extension %s\n", bs->backing_format); 251 #endif 252 break; 253 254 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 255 if (p_feature_table != NULL) { 256 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 257 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 258 if (ret < 0) { 259 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 260 "Could not read table"); 261 return ret; 262 } 263 264 *p_feature_table = feature_table; 265 } 266 break; 267 268 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 269 unsigned int cflags = 0; 270 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 271 error_setg(errp, "CRYPTO header extension only " 272 "expected with LUKS encryption method"); 273 return -EINVAL; 274 } 275 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 276 error_setg(errp, "CRYPTO header extension size %u, " 277 "but expected size %zu", ext.len, 278 sizeof(Qcow2CryptoHeaderExtension)); 279 return -EINVAL; 280 } 281 282 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 283 if (ret < 0) { 284 error_setg_errno(errp, -ret, 285 "Unable to read CRYPTO header extension"); 286 return ret; 287 } 288 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 289 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 290 291 if ((s->crypto_header.offset % s->cluster_size) != 0) { 292 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 293 "not a multiple of cluster size '%u'", 294 s->crypto_header.offset, s->cluster_size); 295 return -EINVAL; 296 } 297 298 if (flags & BDRV_O_NO_IO) { 299 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 300 } 301 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 302 qcow2_crypto_hdr_read_func, 303 bs, cflags, QCOW2_MAX_THREADS, errp); 304 if (!s->crypto) { 305 return -EINVAL; 306 } 307 } break; 308 309 case QCOW2_EXT_MAGIC_BITMAPS: 310 if (ext.len != sizeof(bitmaps_ext)) { 311 error_setg_errno(errp, -ret, "bitmaps_ext: " 312 "Invalid extension length"); 313 return -EINVAL; 314 } 315 316 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 317 if (s->qcow_version < 3) { 318 /* Let's be a bit more specific */ 319 warn_report("This qcow2 v2 image contains bitmaps, but " 320 "they may have been modified by a program " 321 "without persistent bitmap support; so now " 322 "they must all be considered inconsistent"); 323 } else { 324 warn_report("a program lacking bitmap support " 325 "modified this file, so all bitmaps are now " 326 "considered inconsistent"); 327 } 328 error_printf("Some clusters may be leaked, " 329 "run 'qemu-img check -r' on the image " 330 "file to fix."); 331 if (need_update_header != NULL) { 332 /* Updating is needed to drop invalid bitmap extension. */ 333 *need_update_header = true; 334 } 335 break; 336 } 337 338 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 339 if (ret < 0) { 340 error_setg_errno(errp, -ret, "bitmaps_ext: " 341 "Could not read ext header"); 342 return ret; 343 } 344 345 if (bitmaps_ext.reserved32 != 0) { 346 error_setg_errno(errp, -ret, "bitmaps_ext: " 347 "Reserved field is not zero"); 348 return -EINVAL; 349 } 350 351 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 352 bitmaps_ext.bitmap_directory_size = 353 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 354 bitmaps_ext.bitmap_directory_offset = 355 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 356 357 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 358 error_setg(errp, 359 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 360 "exceeding the QEMU supported maximum of %d", 361 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 362 return -EINVAL; 363 } 364 365 if (bitmaps_ext.nb_bitmaps == 0) { 366 error_setg(errp, "found bitmaps extension with zero bitmaps"); 367 return -EINVAL; 368 } 369 370 if (bitmaps_ext.bitmap_directory_offset & (s->cluster_size - 1)) { 371 error_setg(errp, "bitmaps_ext: " 372 "invalid bitmap directory offset"); 373 return -EINVAL; 374 } 375 376 if (bitmaps_ext.bitmap_directory_size > 377 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 378 error_setg(errp, "bitmaps_ext: " 379 "bitmap directory size (%" PRIu64 ") exceeds " 380 "the maximum supported size (%d)", 381 bitmaps_ext.bitmap_directory_size, 382 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 383 return -EINVAL; 384 } 385 386 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 387 s->bitmap_directory_offset = 388 bitmaps_ext.bitmap_directory_offset; 389 s->bitmap_directory_size = 390 bitmaps_ext.bitmap_directory_size; 391 392 #ifdef DEBUG_EXT 393 printf("Qcow2: Got bitmaps extension: " 394 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 395 s->bitmap_directory_offset, s->nb_bitmaps); 396 #endif 397 break; 398 399 case QCOW2_EXT_MAGIC_DATA_FILE: 400 { 401 s->image_data_file = g_malloc0(ext.len + 1); 402 ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len); 403 if (ret < 0) { 404 error_setg_errno(errp, -ret, 405 "ERROR: Could not read data file name"); 406 return ret; 407 } 408 #ifdef DEBUG_EXT 409 printf("Qcow2: Got external data file %s\n", s->image_data_file); 410 #endif 411 break; 412 } 413 414 default: 415 /* unknown magic - save it in case we need to rewrite the header */ 416 /* If you add a new feature, make sure to also update the fast 417 * path of qcow2_make_empty() to deal with it. */ 418 { 419 Qcow2UnknownHeaderExtension *uext; 420 421 uext = g_malloc0(sizeof(*uext) + ext.len); 422 uext->magic = ext.magic; 423 uext->len = ext.len; 424 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 425 426 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 427 if (ret < 0) { 428 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 429 "Could not read data"); 430 return ret; 431 } 432 } 433 break; 434 } 435 436 offset += ((ext.len + 7) & ~7); 437 } 438 439 return 0; 440 } 441 442 static void cleanup_unknown_header_ext(BlockDriverState *bs) 443 { 444 BDRVQcow2State *s = bs->opaque; 445 Qcow2UnknownHeaderExtension *uext, *next; 446 447 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 448 QLIST_REMOVE(uext, next); 449 g_free(uext); 450 } 451 } 452 453 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 454 uint64_t mask) 455 { 456 char *features = g_strdup(""); 457 char *old; 458 459 while (table && table->name[0] != '\0') { 460 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 461 if (mask & (1ULL << table->bit)) { 462 old = features; 463 features = g_strdup_printf("%s%s%.46s", old, *old ? ", " : "", 464 table->name); 465 g_free(old); 466 mask &= ~(1ULL << table->bit); 467 } 468 } 469 table++; 470 } 471 472 if (mask) { 473 old = features; 474 features = g_strdup_printf("%s%sUnknown incompatible feature: %" PRIx64, 475 old, *old ? ", " : "", mask); 476 g_free(old); 477 } 478 479 error_setg(errp, "Unsupported qcow2 feature(s): %s", features); 480 g_free(features); 481 } 482 483 /* 484 * Sets the dirty bit and flushes afterwards if necessary. 485 * 486 * The incompatible_features bit is only set if the image file header was 487 * updated successfully. Therefore it is not required to check the return 488 * value of this function. 489 */ 490 int qcow2_mark_dirty(BlockDriverState *bs) 491 { 492 BDRVQcow2State *s = bs->opaque; 493 uint64_t val; 494 int ret; 495 496 assert(s->qcow_version >= 3); 497 498 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 499 return 0; /* already dirty */ 500 } 501 502 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 503 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 504 &val, sizeof(val)); 505 if (ret < 0) { 506 return ret; 507 } 508 ret = bdrv_flush(bs->file->bs); 509 if (ret < 0) { 510 return ret; 511 } 512 513 /* Only treat image as dirty if the header was updated successfully */ 514 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 515 return 0; 516 } 517 518 /* 519 * Clears the dirty bit and flushes before if necessary. Only call this 520 * function when there are no pending requests, it does not guard against 521 * concurrent requests dirtying the image. 522 */ 523 static int qcow2_mark_clean(BlockDriverState *bs) 524 { 525 BDRVQcow2State *s = bs->opaque; 526 527 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 528 int ret; 529 530 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 531 532 ret = qcow2_flush_caches(bs); 533 if (ret < 0) { 534 return ret; 535 } 536 537 return qcow2_update_header(bs); 538 } 539 return 0; 540 } 541 542 /* 543 * Marks the image as corrupt. 544 */ 545 int qcow2_mark_corrupt(BlockDriverState *bs) 546 { 547 BDRVQcow2State *s = bs->opaque; 548 549 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 550 return qcow2_update_header(bs); 551 } 552 553 /* 554 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 555 * before if necessary. 556 */ 557 int qcow2_mark_consistent(BlockDriverState *bs) 558 { 559 BDRVQcow2State *s = bs->opaque; 560 561 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 562 int ret = qcow2_flush_caches(bs); 563 if (ret < 0) { 564 return ret; 565 } 566 567 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 568 return qcow2_update_header(bs); 569 } 570 return 0; 571 } 572 573 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs, 574 BdrvCheckResult *result, 575 BdrvCheckMode fix) 576 { 577 int ret = qcow2_check_refcounts(bs, result, fix); 578 if (ret < 0) { 579 return ret; 580 } 581 582 if (fix && result->check_errors == 0 && result->corruptions == 0) { 583 ret = qcow2_mark_clean(bs); 584 if (ret < 0) { 585 return ret; 586 } 587 return qcow2_mark_consistent(bs); 588 } 589 return ret; 590 } 591 592 static int coroutine_fn qcow2_co_check(BlockDriverState *bs, 593 BdrvCheckResult *result, 594 BdrvCheckMode fix) 595 { 596 BDRVQcow2State *s = bs->opaque; 597 int ret; 598 599 qemu_co_mutex_lock(&s->lock); 600 ret = qcow2_co_check_locked(bs, result, fix); 601 qemu_co_mutex_unlock(&s->lock); 602 return ret; 603 } 604 605 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 606 uint64_t entries, size_t entry_len, 607 int64_t max_size_bytes, const char *table_name, 608 Error **errp) 609 { 610 BDRVQcow2State *s = bs->opaque; 611 612 if (entries > max_size_bytes / entry_len) { 613 error_setg(errp, "%s too large", table_name); 614 return -EFBIG; 615 } 616 617 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 618 * because values will be passed to qemu functions taking int64_t. */ 619 if ((INT64_MAX - entries * entry_len < offset) || 620 (offset_into_cluster(s, offset) != 0)) { 621 error_setg(errp, "%s offset invalid", table_name); 622 return -EINVAL; 623 } 624 625 return 0; 626 } 627 628 static const char *const mutable_opts[] = { 629 QCOW2_OPT_LAZY_REFCOUNTS, 630 QCOW2_OPT_DISCARD_REQUEST, 631 QCOW2_OPT_DISCARD_SNAPSHOT, 632 QCOW2_OPT_DISCARD_OTHER, 633 QCOW2_OPT_OVERLAP, 634 QCOW2_OPT_OVERLAP_TEMPLATE, 635 QCOW2_OPT_OVERLAP_MAIN_HEADER, 636 QCOW2_OPT_OVERLAP_ACTIVE_L1, 637 QCOW2_OPT_OVERLAP_ACTIVE_L2, 638 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 639 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 640 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 641 QCOW2_OPT_OVERLAP_INACTIVE_L1, 642 QCOW2_OPT_OVERLAP_INACTIVE_L2, 643 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 644 QCOW2_OPT_CACHE_SIZE, 645 QCOW2_OPT_L2_CACHE_SIZE, 646 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 647 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 648 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 649 NULL 650 }; 651 652 static QemuOptsList qcow2_runtime_opts = { 653 .name = "qcow2", 654 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 655 .desc = { 656 { 657 .name = QCOW2_OPT_LAZY_REFCOUNTS, 658 .type = QEMU_OPT_BOOL, 659 .help = "Postpone refcount updates", 660 }, 661 { 662 .name = QCOW2_OPT_DISCARD_REQUEST, 663 .type = QEMU_OPT_BOOL, 664 .help = "Pass guest discard requests to the layer below", 665 }, 666 { 667 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 668 .type = QEMU_OPT_BOOL, 669 .help = "Generate discard requests when snapshot related space " 670 "is freed", 671 }, 672 { 673 .name = QCOW2_OPT_DISCARD_OTHER, 674 .type = QEMU_OPT_BOOL, 675 .help = "Generate discard requests when other clusters are freed", 676 }, 677 { 678 .name = QCOW2_OPT_OVERLAP, 679 .type = QEMU_OPT_STRING, 680 .help = "Selects which overlap checks to perform from a range of " 681 "templates (none, constant, cached, all)", 682 }, 683 { 684 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 685 .type = QEMU_OPT_STRING, 686 .help = "Selects which overlap checks to perform from a range of " 687 "templates (none, constant, cached, all)", 688 }, 689 { 690 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 691 .type = QEMU_OPT_BOOL, 692 .help = "Check for unintended writes into the main qcow2 header", 693 }, 694 { 695 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 696 .type = QEMU_OPT_BOOL, 697 .help = "Check for unintended writes into the active L1 table", 698 }, 699 { 700 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 701 .type = QEMU_OPT_BOOL, 702 .help = "Check for unintended writes into an active L2 table", 703 }, 704 { 705 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 706 .type = QEMU_OPT_BOOL, 707 .help = "Check for unintended writes into the refcount table", 708 }, 709 { 710 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 711 .type = QEMU_OPT_BOOL, 712 .help = "Check for unintended writes into a refcount block", 713 }, 714 { 715 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 716 .type = QEMU_OPT_BOOL, 717 .help = "Check for unintended writes into the snapshot table", 718 }, 719 { 720 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 721 .type = QEMU_OPT_BOOL, 722 .help = "Check for unintended writes into an inactive L1 table", 723 }, 724 { 725 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 726 .type = QEMU_OPT_BOOL, 727 .help = "Check for unintended writes into an inactive L2 table", 728 }, 729 { 730 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 731 .type = QEMU_OPT_BOOL, 732 .help = "Check for unintended writes into the bitmap directory", 733 }, 734 { 735 .name = QCOW2_OPT_CACHE_SIZE, 736 .type = QEMU_OPT_SIZE, 737 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 738 "cache size", 739 }, 740 { 741 .name = QCOW2_OPT_L2_CACHE_SIZE, 742 .type = QEMU_OPT_SIZE, 743 .help = "Maximum L2 table cache size", 744 }, 745 { 746 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 747 .type = QEMU_OPT_SIZE, 748 .help = "Size of each entry in the L2 cache", 749 }, 750 { 751 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 752 .type = QEMU_OPT_SIZE, 753 .help = "Maximum refcount block cache size", 754 }, 755 { 756 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 757 .type = QEMU_OPT_NUMBER, 758 .help = "Clean unused cache entries after this time (in seconds)", 759 }, 760 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 761 "ID of secret providing qcow2 AES key or LUKS passphrase"), 762 { /* end of list */ } 763 }, 764 }; 765 766 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 767 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 768 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 769 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 770 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 771 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 772 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 773 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 774 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 775 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 776 }; 777 778 static void cache_clean_timer_cb(void *opaque) 779 { 780 BlockDriverState *bs = opaque; 781 BDRVQcow2State *s = bs->opaque; 782 qcow2_cache_clean_unused(s->l2_table_cache); 783 qcow2_cache_clean_unused(s->refcount_block_cache); 784 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 785 (int64_t) s->cache_clean_interval * 1000); 786 } 787 788 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 789 { 790 BDRVQcow2State *s = bs->opaque; 791 if (s->cache_clean_interval > 0) { 792 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 793 SCALE_MS, cache_clean_timer_cb, 794 bs); 795 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 796 (int64_t) s->cache_clean_interval * 1000); 797 } 798 } 799 800 static void cache_clean_timer_del(BlockDriverState *bs) 801 { 802 BDRVQcow2State *s = bs->opaque; 803 if (s->cache_clean_timer) { 804 timer_del(s->cache_clean_timer); 805 timer_free(s->cache_clean_timer); 806 s->cache_clean_timer = NULL; 807 } 808 } 809 810 static void qcow2_detach_aio_context(BlockDriverState *bs) 811 { 812 cache_clean_timer_del(bs); 813 } 814 815 static void qcow2_attach_aio_context(BlockDriverState *bs, 816 AioContext *new_context) 817 { 818 cache_clean_timer_init(bs, new_context); 819 } 820 821 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 822 uint64_t *l2_cache_size, 823 uint64_t *l2_cache_entry_size, 824 uint64_t *refcount_cache_size, Error **errp) 825 { 826 BDRVQcow2State *s = bs->opaque; 827 uint64_t combined_cache_size, l2_cache_max_setting; 828 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 829 bool l2_cache_entry_size_set; 830 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 831 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 832 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 833 /* An L2 table is always one cluster in size so the max cache size 834 * should be a multiple of the cluster size. */ 835 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t), 836 s->cluster_size); 837 838 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 839 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 840 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 841 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 842 843 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 844 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 845 DEFAULT_L2_CACHE_MAX_SIZE); 846 *refcount_cache_size = qemu_opt_get_size(opts, 847 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 848 849 *l2_cache_entry_size = qemu_opt_get_size( 850 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 851 852 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 853 854 if (combined_cache_size_set) { 855 if (l2_cache_size_set && refcount_cache_size_set) { 856 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 857 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 858 "at the same time"); 859 return; 860 } else if (l2_cache_size_set && 861 (l2_cache_max_setting > combined_cache_size)) { 862 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 863 QCOW2_OPT_CACHE_SIZE); 864 return; 865 } else if (*refcount_cache_size > combined_cache_size) { 866 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 867 QCOW2_OPT_CACHE_SIZE); 868 return; 869 } 870 871 if (l2_cache_size_set) { 872 *refcount_cache_size = combined_cache_size - *l2_cache_size; 873 } else if (refcount_cache_size_set) { 874 *l2_cache_size = combined_cache_size - *refcount_cache_size; 875 } else { 876 /* Assign as much memory as possible to the L2 cache, and 877 * use the remainder for the refcount cache */ 878 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 879 *l2_cache_size = max_l2_cache; 880 *refcount_cache_size = combined_cache_size - *l2_cache_size; 881 } else { 882 *refcount_cache_size = 883 MIN(combined_cache_size, min_refcount_cache); 884 *l2_cache_size = combined_cache_size - *refcount_cache_size; 885 } 886 } 887 } 888 889 /* 890 * If the L2 cache is not enough to cover the whole disk then 891 * default to 4KB entries. Smaller entries reduce the cost of 892 * loads and evictions and increase I/O performance. 893 */ 894 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 895 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 896 } 897 898 /* l2_cache_size and refcount_cache_size are ensured to have at least 899 * their minimum values in qcow2_update_options_prepare() */ 900 901 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 902 *l2_cache_entry_size > s->cluster_size || 903 !is_power_of_2(*l2_cache_entry_size)) { 904 error_setg(errp, "L2 cache entry size must be a power of two " 905 "between %d and the cluster size (%d)", 906 1 << MIN_CLUSTER_BITS, s->cluster_size); 907 return; 908 } 909 } 910 911 typedef struct Qcow2ReopenState { 912 Qcow2Cache *l2_table_cache; 913 Qcow2Cache *refcount_block_cache; 914 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 915 bool use_lazy_refcounts; 916 int overlap_check; 917 bool discard_passthrough[QCOW2_DISCARD_MAX]; 918 uint64_t cache_clean_interval; 919 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 920 } Qcow2ReopenState; 921 922 static int qcow2_update_options_prepare(BlockDriverState *bs, 923 Qcow2ReopenState *r, 924 QDict *options, int flags, 925 Error **errp) 926 { 927 BDRVQcow2State *s = bs->opaque; 928 QemuOpts *opts = NULL; 929 const char *opt_overlap_check, *opt_overlap_check_template; 930 int overlap_check_template = 0; 931 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 932 int i; 933 const char *encryptfmt; 934 QDict *encryptopts = NULL; 935 Error *local_err = NULL; 936 int ret; 937 938 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 939 encryptfmt = qdict_get_try_str(encryptopts, "format"); 940 941 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 942 qemu_opts_absorb_qdict(opts, options, &local_err); 943 if (local_err) { 944 error_propagate(errp, local_err); 945 ret = -EINVAL; 946 goto fail; 947 } 948 949 /* get L2 table/refcount block cache size from command line options */ 950 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 951 &refcount_cache_size, &local_err); 952 if (local_err) { 953 error_propagate(errp, local_err); 954 ret = -EINVAL; 955 goto fail; 956 } 957 958 l2_cache_size /= l2_cache_entry_size; 959 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 960 l2_cache_size = MIN_L2_CACHE_SIZE; 961 } 962 if (l2_cache_size > INT_MAX) { 963 error_setg(errp, "L2 cache size too big"); 964 ret = -EINVAL; 965 goto fail; 966 } 967 968 refcount_cache_size /= s->cluster_size; 969 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 970 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 971 } 972 if (refcount_cache_size > INT_MAX) { 973 error_setg(errp, "Refcount cache size too big"); 974 ret = -EINVAL; 975 goto fail; 976 } 977 978 /* alloc new L2 table/refcount block cache, flush old one */ 979 if (s->l2_table_cache) { 980 ret = qcow2_cache_flush(bs, s->l2_table_cache); 981 if (ret) { 982 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 983 goto fail; 984 } 985 } 986 987 if (s->refcount_block_cache) { 988 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 989 if (ret) { 990 error_setg_errno(errp, -ret, 991 "Failed to flush the refcount block cache"); 992 goto fail; 993 } 994 } 995 996 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); 997 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 998 l2_cache_entry_size); 999 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1000 s->cluster_size); 1001 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1002 error_setg(errp, "Could not allocate metadata caches"); 1003 ret = -ENOMEM; 1004 goto fail; 1005 } 1006 1007 /* New interval for cache cleanup timer */ 1008 r->cache_clean_interval = 1009 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1010 DEFAULT_CACHE_CLEAN_INTERVAL); 1011 #ifndef CONFIG_LINUX 1012 if (r->cache_clean_interval != 0) { 1013 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1014 " not supported on this host"); 1015 ret = -EINVAL; 1016 goto fail; 1017 } 1018 #endif 1019 if (r->cache_clean_interval > UINT_MAX) { 1020 error_setg(errp, "Cache clean interval too big"); 1021 ret = -EINVAL; 1022 goto fail; 1023 } 1024 1025 /* lazy-refcounts; flush if going from enabled to disabled */ 1026 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1027 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1028 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1029 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1030 "qemu 1.1 compatibility level"); 1031 ret = -EINVAL; 1032 goto fail; 1033 } 1034 1035 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1036 ret = qcow2_mark_clean(bs); 1037 if (ret < 0) { 1038 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1039 goto fail; 1040 } 1041 } 1042 1043 /* Overlap check options */ 1044 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1045 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1046 if (opt_overlap_check_template && opt_overlap_check && 1047 strcmp(opt_overlap_check_template, opt_overlap_check)) 1048 { 1049 error_setg(errp, "Conflicting values for qcow2 options '" 1050 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1051 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1052 ret = -EINVAL; 1053 goto fail; 1054 } 1055 if (!opt_overlap_check) { 1056 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1057 } 1058 1059 if (!strcmp(opt_overlap_check, "none")) { 1060 overlap_check_template = 0; 1061 } else if (!strcmp(opt_overlap_check, "constant")) { 1062 overlap_check_template = QCOW2_OL_CONSTANT; 1063 } else if (!strcmp(opt_overlap_check, "cached")) { 1064 overlap_check_template = QCOW2_OL_CACHED; 1065 } else if (!strcmp(opt_overlap_check, "all")) { 1066 overlap_check_template = QCOW2_OL_ALL; 1067 } else { 1068 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1069 "'overlap-check'. Allowed are any of the following: " 1070 "none, constant, cached, all", opt_overlap_check); 1071 ret = -EINVAL; 1072 goto fail; 1073 } 1074 1075 r->overlap_check = 0; 1076 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1077 /* overlap-check defines a template bitmask, but every flag may be 1078 * overwritten through the associated boolean option */ 1079 r->overlap_check |= 1080 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1081 overlap_check_template & (1 << i)) << i; 1082 } 1083 1084 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1085 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1086 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1087 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1088 flags & BDRV_O_UNMAP); 1089 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1090 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1091 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1092 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1093 1094 switch (s->crypt_method_header) { 1095 case QCOW_CRYPT_NONE: 1096 if (encryptfmt) { 1097 error_setg(errp, "No encryption in image header, but options " 1098 "specified format '%s'", encryptfmt); 1099 ret = -EINVAL; 1100 goto fail; 1101 } 1102 break; 1103 1104 case QCOW_CRYPT_AES: 1105 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1106 error_setg(errp, 1107 "Header reported 'aes' encryption format but " 1108 "options specify '%s'", encryptfmt); 1109 ret = -EINVAL; 1110 goto fail; 1111 } 1112 qdict_put_str(encryptopts, "format", "qcow"); 1113 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1114 break; 1115 1116 case QCOW_CRYPT_LUKS: 1117 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1118 error_setg(errp, 1119 "Header reported 'luks' encryption format but " 1120 "options specify '%s'", encryptfmt); 1121 ret = -EINVAL; 1122 goto fail; 1123 } 1124 qdict_put_str(encryptopts, "format", "luks"); 1125 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1126 break; 1127 1128 default: 1129 error_setg(errp, "Unsupported encryption method %d", 1130 s->crypt_method_header); 1131 break; 1132 } 1133 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1134 ret = -EINVAL; 1135 goto fail; 1136 } 1137 1138 ret = 0; 1139 fail: 1140 qobject_unref(encryptopts); 1141 qemu_opts_del(opts); 1142 opts = NULL; 1143 return ret; 1144 } 1145 1146 static void qcow2_update_options_commit(BlockDriverState *bs, 1147 Qcow2ReopenState *r) 1148 { 1149 BDRVQcow2State *s = bs->opaque; 1150 int i; 1151 1152 if (s->l2_table_cache) { 1153 qcow2_cache_destroy(s->l2_table_cache); 1154 } 1155 if (s->refcount_block_cache) { 1156 qcow2_cache_destroy(s->refcount_block_cache); 1157 } 1158 s->l2_table_cache = r->l2_table_cache; 1159 s->refcount_block_cache = r->refcount_block_cache; 1160 s->l2_slice_size = r->l2_slice_size; 1161 1162 s->overlap_check = r->overlap_check; 1163 s->use_lazy_refcounts = r->use_lazy_refcounts; 1164 1165 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1166 s->discard_passthrough[i] = r->discard_passthrough[i]; 1167 } 1168 1169 if (s->cache_clean_interval != r->cache_clean_interval) { 1170 cache_clean_timer_del(bs); 1171 s->cache_clean_interval = r->cache_clean_interval; 1172 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1173 } 1174 1175 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1176 s->crypto_opts = r->crypto_opts; 1177 } 1178 1179 static void qcow2_update_options_abort(BlockDriverState *bs, 1180 Qcow2ReopenState *r) 1181 { 1182 if (r->l2_table_cache) { 1183 qcow2_cache_destroy(r->l2_table_cache); 1184 } 1185 if (r->refcount_block_cache) { 1186 qcow2_cache_destroy(r->refcount_block_cache); 1187 } 1188 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1189 } 1190 1191 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1192 int flags, Error **errp) 1193 { 1194 Qcow2ReopenState r = {}; 1195 int ret; 1196 1197 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1198 if (ret >= 0) { 1199 qcow2_update_options_commit(bs, &r); 1200 } else { 1201 qcow2_update_options_abort(bs, &r); 1202 } 1203 1204 return ret; 1205 } 1206 1207 /* Called with s->lock held. */ 1208 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, 1209 int flags, Error **errp) 1210 { 1211 BDRVQcow2State *s = bs->opaque; 1212 unsigned int len, i; 1213 int ret = 0; 1214 QCowHeader header; 1215 Error *local_err = NULL; 1216 uint64_t ext_end; 1217 uint64_t l1_vm_state_index; 1218 bool update_header = false; 1219 1220 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1221 if (ret < 0) { 1222 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1223 goto fail; 1224 } 1225 header.magic = be32_to_cpu(header.magic); 1226 header.version = be32_to_cpu(header.version); 1227 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1228 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1229 header.size = be64_to_cpu(header.size); 1230 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1231 header.crypt_method = be32_to_cpu(header.crypt_method); 1232 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1233 header.l1_size = be32_to_cpu(header.l1_size); 1234 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1235 header.refcount_table_clusters = 1236 be32_to_cpu(header.refcount_table_clusters); 1237 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1238 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1239 1240 if (header.magic != QCOW_MAGIC) { 1241 error_setg(errp, "Image is not in qcow2 format"); 1242 ret = -EINVAL; 1243 goto fail; 1244 } 1245 if (header.version < 2 || header.version > 3) { 1246 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1247 ret = -ENOTSUP; 1248 goto fail; 1249 } 1250 1251 s->qcow_version = header.version; 1252 1253 /* Initialise cluster size */ 1254 if (header.cluster_bits < MIN_CLUSTER_BITS || 1255 header.cluster_bits > MAX_CLUSTER_BITS) { 1256 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1257 header.cluster_bits); 1258 ret = -EINVAL; 1259 goto fail; 1260 } 1261 1262 s->cluster_bits = header.cluster_bits; 1263 s->cluster_size = 1 << s->cluster_bits; 1264 1265 /* Initialise version 3 header fields */ 1266 if (header.version == 2) { 1267 header.incompatible_features = 0; 1268 header.compatible_features = 0; 1269 header.autoclear_features = 0; 1270 header.refcount_order = 4; 1271 header.header_length = 72; 1272 } else { 1273 header.incompatible_features = 1274 be64_to_cpu(header.incompatible_features); 1275 header.compatible_features = be64_to_cpu(header.compatible_features); 1276 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1277 header.refcount_order = be32_to_cpu(header.refcount_order); 1278 header.header_length = be32_to_cpu(header.header_length); 1279 1280 if (header.header_length < 104) { 1281 error_setg(errp, "qcow2 header too short"); 1282 ret = -EINVAL; 1283 goto fail; 1284 } 1285 } 1286 1287 if (header.header_length > s->cluster_size) { 1288 error_setg(errp, "qcow2 header exceeds cluster size"); 1289 ret = -EINVAL; 1290 goto fail; 1291 } 1292 1293 if (header.header_length > sizeof(header)) { 1294 s->unknown_header_fields_size = header.header_length - sizeof(header); 1295 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1296 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1297 s->unknown_header_fields_size); 1298 if (ret < 0) { 1299 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1300 "fields"); 1301 goto fail; 1302 } 1303 } 1304 1305 if (header.backing_file_offset > s->cluster_size) { 1306 error_setg(errp, "Invalid backing file offset"); 1307 ret = -EINVAL; 1308 goto fail; 1309 } 1310 1311 if (header.backing_file_offset) { 1312 ext_end = header.backing_file_offset; 1313 } else { 1314 ext_end = 1 << header.cluster_bits; 1315 } 1316 1317 /* Handle feature bits */ 1318 s->incompatible_features = header.incompatible_features; 1319 s->compatible_features = header.compatible_features; 1320 s->autoclear_features = header.autoclear_features; 1321 1322 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1323 void *feature_table = NULL; 1324 qcow2_read_extensions(bs, header.header_length, ext_end, 1325 &feature_table, flags, NULL, NULL); 1326 report_unsupported_feature(errp, feature_table, 1327 s->incompatible_features & 1328 ~QCOW2_INCOMPAT_MASK); 1329 ret = -ENOTSUP; 1330 g_free(feature_table); 1331 goto fail; 1332 } 1333 1334 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1335 /* Corrupt images may not be written to unless they are being repaired 1336 */ 1337 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1338 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1339 "read/write"); 1340 ret = -EACCES; 1341 goto fail; 1342 } 1343 } 1344 1345 /* Check support for various header values */ 1346 if (header.refcount_order > 6) { 1347 error_setg(errp, "Reference count entry width too large; may not " 1348 "exceed 64 bits"); 1349 ret = -EINVAL; 1350 goto fail; 1351 } 1352 s->refcount_order = header.refcount_order; 1353 s->refcount_bits = 1 << s->refcount_order; 1354 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1355 s->refcount_max += s->refcount_max - 1; 1356 1357 s->crypt_method_header = header.crypt_method; 1358 if (s->crypt_method_header) { 1359 if (bdrv_uses_whitelist() && 1360 s->crypt_method_header == QCOW_CRYPT_AES) { 1361 error_setg(errp, 1362 "Use of AES-CBC encrypted qcow2 images is no longer " 1363 "supported in system emulators"); 1364 error_append_hint(errp, 1365 "You can use 'qemu-img convert' to convert your " 1366 "image to an alternative supported format, such " 1367 "as unencrypted qcow2, or raw with the LUKS " 1368 "format instead.\n"); 1369 ret = -ENOSYS; 1370 goto fail; 1371 } 1372 1373 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1374 s->crypt_physical_offset = false; 1375 } else { 1376 /* Assuming LUKS and any future crypt methods we 1377 * add will all use physical offsets, due to the 1378 * fact that the alternative is insecure... */ 1379 s->crypt_physical_offset = true; 1380 } 1381 1382 bs->encrypted = true; 1383 } 1384 1385 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1386 s->l2_size = 1 << s->l2_bits; 1387 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1388 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1389 s->refcount_block_size = 1 << s->refcount_block_bits; 1390 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1391 s->csize_shift = (62 - (s->cluster_bits - 8)); 1392 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1393 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1394 1395 s->refcount_table_offset = header.refcount_table_offset; 1396 s->refcount_table_size = 1397 header.refcount_table_clusters << (s->cluster_bits - 3); 1398 1399 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1400 error_setg(errp, "Image does not contain a reference count table"); 1401 ret = -EINVAL; 1402 goto fail; 1403 } 1404 1405 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1406 header.refcount_table_clusters, 1407 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1408 "Reference count table", errp); 1409 if (ret < 0) { 1410 goto fail; 1411 } 1412 1413 /* The total size in bytes of the snapshot table is checked in 1414 * qcow2_read_snapshots() because the size of each snapshot is 1415 * variable and we don't know it yet. 1416 * Here we only check the offset and number of snapshots. */ 1417 ret = qcow2_validate_table(bs, header.snapshots_offset, 1418 header.nb_snapshots, 1419 sizeof(QCowSnapshotHeader), 1420 sizeof(QCowSnapshotHeader) * QCOW_MAX_SNAPSHOTS, 1421 "Snapshot table", errp); 1422 if (ret < 0) { 1423 goto fail; 1424 } 1425 1426 /* read the level 1 table */ 1427 ret = qcow2_validate_table(bs, header.l1_table_offset, 1428 header.l1_size, sizeof(uint64_t), 1429 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1430 if (ret < 0) { 1431 goto fail; 1432 } 1433 s->l1_size = header.l1_size; 1434 s->l1_table_offset = header.l1_table_offset; 1435 1436 l1_vm_state_index = size_to_l1(s, header.size); 1437 if (l1_vm_state_index > INT_MAX) { 1438 error_setg(errp, "Image is too big"); 1439 ret = -EFBIG; 1440 goto fail; 1441 } 1442 s->l1_vm_state_index = l1_vm_state_index; 1443 1444 /* the L1 table must contain at least enough entries to put 1445 header.size bytes */ 1446 if (s->l1_size < s->l1_vm_state_index) { 1447 error_setg(errp, "L1 table is too small"); 1448 ret = -EINVAL; 1449 goto fail; 1450 } 1451 1452 if (s->l1_size > 0) { 1453 s->l1_table = qemu_try_blockalign(bs->file->bs, 1454 ROUND_UP(s->l1_size * sizeof(uint64_t), 512)); 1455 if (s->l1_table == NULL) { 1456 error_setg(errp, "Could not allocate L1 table"); 1457 ret = -ENOMEM; 1458 goto fail; 1459 } 1460 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1461 s->l1_size * sizeof(uint64_t)); 1462 if (ret < 0) { 1463 error_setg_errno(errp, -ret, "Could not read L1 table"); 1464 goto fail; 1465 } 1466 for(i = 0;i < s->l1_size; i++) { 1467 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1468 } 1469 } 1470 1471 /* Parse driver-specific options */ 1472 ret = qcow2_update_options(bs, options, flags, errp); 1473 if (ret < 0) { 1474 goto fail; 1475 } 1476 1477 s->flags = flags; 1478 1479 ret = qcow2_refcount_init(bs); 1480 if (ret != 0) { 1481 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1482 goto fail; 1483 } 1484 1485 QLIST_INIT(&s->cluster_allocs); 1486 QTAILQ_INIT(&s->discards); 1487 1488 /* read qcow2 extensions */ 1489 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1490 flags, &update_header, &local_err)) { 1491 error_propagate(errp, local_err); 1492 ret = -EINVAL; 1493 goto fail; 1494 } 1495 1496 /* Open external data file */ 1497 s->data_file = bdrv_open_child(NULL, options, "data-file", bs, &child_file, 1498 true, &local_err); 1499 if (local_err) { 1500 error_propagate(errp, local_err); 1501 ret = -EINVAL; 1502 goto fail; 1503 } 1504 1505 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1506 if (!s->data_file && s->image_data_file) { 1507 s->data_file = bdrv_open_child(s->image_data_file, options, 1508 "data-file", bs, &child_file, 1509 false, errp); 1510 if (!s->data_file) { 1511 ret = -EINVAL; 1512 goto fail; 1513 } 1514 } 1515 if (!s->data_file) { 1516 error_setg(errp, "'data-file' is required for this image"); 1517 ret = -EINVAL; 1518 goto fail; 1519 } 1520 } else { 1521 if (s->data_file) { 1522 error_setg(errp, "'data-file' can only be set for images with an " 1523 "external data file"); 1524 ret = -EINVAL; 1525 goto fail; 1526 } 1527 1528 s->data_file = bs->file; 1529 1530 if (data_file_is_raw(bs)) { 1531 error_setg(errp, "data-file-raw requires a data file"); 1532 ret = -EINVAL; 1533 goto fail; 1534 } 1535 } 1536 1537 /* qcow2_read_extension may have set up the crypto context 1538 * if the crypt method needs a header region, some methods 1539 * don't need header extensions, so must check here 1540 */ 1541 if (s->crypt_method_header && !s->crypto) { 1542 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1543 unsigned int cflags = 0; 1544 if (flags & BDRV_O_NO_IO) { 1545 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1546 } 1547 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1548 NULL, NULL, cflags, 1549 QCOW2_MAX_THREADS, errp); 1550 if (!s->crypto) { 1551 ret = -EINVAL; 1552 goto fail; 1553 } 1554 } else if (!(flags & BDRV_O_NO_IO)) { 1555 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1556 s->crypt_method_header); 1557 ret = -EINVAL; 1558 goto fail; 1559 } 1560 } 1561 1562 /* read the backing file name */ 1563 if (header.backing_file_offset != 0) { 1564 len = header.backing_file_size; 1565 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1566 len >= sizeof(bs->backing_file)) { 1567 error_setg(errp, "Backing file name too long"); 1568 ret = -EINVAL; 1569 goto fail; 1570 } 1571 ret = bdrv_pread(bs->file, header.backing_file_offset, 1572 bs->auto_backing_file, len); 1573 if (ret < 0) { 1574 error_setg_errno(errp, -ret, "Could not read backing file name"); 1575 goto fail; 1576 } 1577 bs->auto_backing_file[len] = '\0'; 1578 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1579 bs->auto_backing_file); 1580 s->image_backing_file = g_strdup(bs->auto_backing_file); 1581 } 1582 1583 /* Internal snapshots */ 1584 s->snapshots_offset = header.snapshots_offset; 1585 s->nb_snapshots = header.nb_snapshots; 1586 1587 ret = qcow2_read_snapshots(bs); 1588 if (ret < 0) { 1589 error_setg_errno(errp, -ret, "Could not read snapshots"); 1590 goto fail; 1591 } 1592 1593 /* Clear unknown autoclear feature bits */ 1594 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1595 update_header = 1596 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1597 if (update_header) { 1598 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1599 } 1600 1601 /* == Handle persistent dirty bitmaps == 1602 * 1603 * We want load dirty bitmaps in three cases: 1604 * 1605 * 1. Normal open of the disk in active mode, not related to invalidation 1606 * after migration. 1607 * 1608 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1609 * bitmaps are _not_ migrating through migration channel, i.e. 1610 * 'dirty-bitmaps' capability is disabled. 1611 * 1612 * 3. Invalidation of source vm after failed or canceled migration. 1613 * This is a very interesting case. There are two possible types of 1614 * bitmaps: 1615 * 1616 * A. Stored on inactivation and removed. They should be loaded from the 1617 * image. 1618 * 1619 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1620 * the migration channel (with dirty-bitmaps capability). 1621 * 1622 * On the other hand, there are two possible sub-cases: 1623 * 1624 * 3.1 disk was changed by somebody else while were inactive. In this 1625 * case all in-RAM dirty bitmaps (both persistent and not) are 1626 * definitely invalid. And we don't have any method to determine 1627 * this. 1628 * 1629 * Simple and safe thing is to just drop all the bitmaps of type B on 1630 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1631 * 1632 * On the other hand, resuming source vm, if disk was already changed 1633 * is a bad thing anyway: not only bitmaps, the whole vm state is 1634 * out of sync with disk. 1635 * 1636 * This means, that user or management tool, who for some reason 1637 * decided to resume source vm, after disk was already changed by 1638 * target vm, should at least drop all dirty bitmaps by hand. 1639 * 1640 * So, we can ignore this case for now, but TODO: "generation" 1641 * extension for qcow2, to determine, that image was changed after 1642 * last inactivation. And if it is changed, we will drop (or at least 1643 * mark as 'invalid' all the bitmaps of type B, both persistent 1644 * and not). 1645 * 1646 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1647 * to disk ('dirty-bitmaps' capability disabled), or not saved 1648 * ('dirty-bitmaps' capability enabled), but we don't need to care 1649 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1650 * and not stored has flag IN_USE=1 in the image and will be skipped 1651 * on loading. 1652 * 1653 * One remaining possible case when we don't want load bitmaps: 1654 * 1655 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1656 * will be loaded on invalidation, no needs try loading them before) 1657 */ 1658 1659 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1660 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1661 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err); 1662 1663 update_header = update_header && !header_updated; 1664 } 1665 if (local_err != NULL) { 1666 error_propagate(errp, local_err); 1667 ret = -EINVAL; 1668 goto fail; 1669 } 1670 1671 if (update_header) { 1672 ret = qcow2_update_header(bs); 1673 if (ret < 0) { 1674 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1675 goto fail; 1676 } 1677 } 1678 1679 bs->supported_zero_flags = header.version >= 3 ? BDRV_REQ_MAY_UNMAP : 0; 1680 1681 /* Repair image if dirty */ 1682 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1683 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1684 BdrvCheckResult result = {0}; 1685 1686 ret = qcow2_co_check_locked(bs, &result, 1687 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1688 if (ret < 0 || result.check_errors) { 1689 if (ret >= 0) { 1690 ret = -EIO; 1691 } 1692 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1693 goto fail; 1694 } 1695 } 1696 1697 #ifdef DEBUG_ALLOC 1698 { 1699 BdrvCheckResult result = {0}; 1700 qcow2_check_refcounts(bs, &result, 0); 1701 } 1702 #endif 1703 1704 qemu_co_queue_init(&s->thread_task_queue); 1705 1706 return ret; 1707 1708 fail: 1709 g_free(s->image_data_file); 1710 if (has_data_file(bs)) { 1711 bdrv_unref_child(bs, s->data_file); 1712 } 1713 g_free(s->unknown_header_fields); 1714 cleanup_unknown_header_ext(bs); 1715 qcow2_free_snapshots(bs); 1716 qcow2_refcount_close(bs); 1717 qemu_vfree(s->l1_table); 1718 /* else pre-write overlap checks in cache_destroy may crash */ 1719 s->l1_table = NULL; 1720 cache_clean_timer_del(bs); 1721 if (s->l2_table_cache) { 1722 qcow2_cache_destroy(s->l2_table_cache); 1723 } 1724 if (s->refcount_block_cache) { 1725 qcow2_cache_destroy(s->refcount_block_cache); 1726 } 1727 qcrypto_block_free(s->crypto); 1728 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1729 return ret; 1730 } 1731 1732 typedef struct QCow2OpenCo { 1733 BlockDriverState *bs; 1734 QDict *options; 1735 int flags; 1736 Error **errp; 1737 int ret; 1738 } QCow2OpenCo; 1739 1740 static void coroutine_fn qcow2_open_entry(void *opaque) 1741 { 1742 QCow2OpenCo *qoc = opaque; 1743 BDRVQcow2State *s = qoc->bs->opaque; 1744 1745 qemu_co_mutex_lock(&s->lock); 1746 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 1747 qemu_co_mutex_unlock(&s->lock); 1748 } 1749 1750 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1751 Error **errp) 1752 { 1753 BDRVQcow2State *s = bs->opaque; 1754 QCow2OpenCo qoc = { 1755 .bs = bs, 1756 .options = options, 1757 .flags = flags, 1758 .errp = errp, 1759 .ret = -EINPROGRESS 1760 }; 1761 1762 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1763 false, errp); 1764 if (!bs->file) { 1765 return -EINVAL; 1766 } 1767 1768 /* Initialise locks */ 1769 qemu_co_mutex_init(&s->lock); 1770 1771 if (qemu_in_coroutine()) { 1772 /* From bdrv_co_create. */ 1773 qcow2_open_entry(&qoc); 1774 } else { 1775 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1776 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1777 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1778 } 1779 return qoc.ret; 1780 } 1781 1782 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1783 { 1784 BDRVQcow2State *s = bs->opaque; 1785 1786 if (bs->encrypted) { 1787 /* Encryption works on a sector granularity */ 1788 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1789 } 1790 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1791 bs->bl.pdiscard_alignment = s->cluster_size; 1792 } 1793 1794 static int qcow2_reopen_prepare(BDRVReopenState *state, 1795 BlockReopenQueue *queue, Error **errp) 1796 { 1797 Qcow2ReopenState *r; 1798 int ret; 1799 1800 r = g_new0(Qcow2ReopenState, 1); 1801 state->opaque = r; 1802 1803 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1804 state->flags, errp); 1805 if (ret < 0) { 1806 goto fail; 1807 } 1808 1809 /* We need to write out any unwritten data if we reopen read-only. */ 1810 if ((state->flags & BDRV_O_RDWR) == 0) { 1811 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1812 if (ret < 0) { 1813 goto fail; 1814 } 1815 1816 ret = bdrv_flush(state->bs); 1817 if (ret < 0) { 1818 goto fail; 1819 } 1820 1821 ret = qcow2_mark_clean(state->bs); 1822 if (ret < 0) { 1823 goto fail; 1824 } 1825 } 1826 1827 return 0; 1828 1829 fail: 1830 qcow2_update_options_abort(state->bs, r); 1831 g_free(r); 1832 return ret; 1833 } 1834 1835 static void qcow2_reopen_commit(BDRVReopenState *state) 1836 { 1837 qcow2_update_options_commit(state->bs, state->opaque); 1838 if (state->flags & BDRV_O_RDWR) { 1839 Error *local_err = NULL; 1840 1841 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 1842 /* 1843 * This is not fatal, bitmaps just left read-only, so all following 1844 * writes will fail. User can remove read-only bitmaps to unblock 1845 * writes or retry reopen. 1846 */ 1847 error_reportf_err(local_err, 1848 "%s: Failed to make dirty bitmaps writable: ", 1849 bdrv_get_node_name(state->bs)); 1850 } 1851 } 1852 g_free(state->opaque); 1853 } 1854 1855 static void qcow2_reopen_abort(BDRVReopenState *state) 1856 { 1857 qcow2_update_options_abort(state->bs, state->opaque); 1858 g_free(state->opaque); 1859 } 1860 1861 static void qcow2_join_options(QDict *options, QDict *old_options) 1862 { 1863 bool has_new_overlap_template = 1864 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1865 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1866 bool has_new_total_cache_size = 1867 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1868 bool has_all_cache_options; 1869 1870 /* New overlap template overrides all old overlap options */ 1871 if (has_new_overlap_template) { 1872 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1873 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1874 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1875 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1876 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1877 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1878 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1879 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1880 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1881 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1882 } 1883 1884 /* New total cache size overrides all old options */ 1885 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1886 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1887 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1888 } 1889 1890 qdict_join(options, old_options, false); 1891 1892 /* 1893 * If after merging all cache size options are set, an old total size is 1894 * overwritten. Do keep all options, however, if all three are new. The 1895 * resulting error message is what we want to happen. 1896 */ 1897 has_all_cache_options = 1898 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1899 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1900 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1901 1902 if (has_all_cache_options && !has_new_total_cache_size) { 1903 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1904 } 1905 } 1906 1907 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, 1908 bool want_zero, 1909 int64_t offset, int64_t count, 1910 int64_t *pnum, int64_t *map, 1911 BlockDriverState **file) 1912 { 1913 BDRVQcow2State *s = bs->opaque; 1914 uint64_t cluster_offset; 1915 int index_in_cluster, ret; 1916 unsigned int bytes; 1917 int status = 0; 1918 1919 if (!s->metadata_preallocation_checked) { 1920 ret = qcow2_detect_metadata_preallocation(bs); 1921 s->metadata_preallocation = (ret == 1); 1922 s->metadata_preallocation_checked = true; 1923 } 1924 1925 bytes = MIN(INT_MAX, count); 1926 qemu_co_mutex_lock(&s->lock); 1927 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset); 1928 qemu_co_mutex_unlock(&s->lock); 1929 if (ret < 0) { 1930 return ret; 1931 } 1932 1933 *pnum = bytes; 1934 1935 if ((ret == QCOW2_CLUSTER_NORMAL || ret == QCOW2_CLUSTER_ZERO_ALLOC) && 1936 !s->crypto) { 1937 index_in_cluster = offset & (s->cluster_size - 1); 1938 *map = cluster_offset | index_in_cluster; 1939 *file = s->data_file->bs; 1940 status |= BDRV_BLOCK_OFFSET_VALID; 1941 } 1942 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1943 status |= BDRV_BLOCK_ZERO; 1944 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1945 status |= BDRV_BLOCK_DATA; 1946 } 1947 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 1948 (status & BDRV_BLOCK_OFFSET_VALID)) 1949 { 1950 status |= BDRV_BLOCK_RECURSE; 1951 } 1952 return status; 1953 } 1954 1955 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, 1956 QCowL2Meta **pl2meta, 1957 bool link_l2) 1958 { 1959 int ret = 0; 1960 QCowL2Meta *l2meta = *pl2meta; 1961 1962 while (l2meta != NULL) { 1963 QCowL2Meta *next; 1964 1965 if (link_l2) { 1966 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 1967 if (ret) { 1968 goto out; 1969 } 1970 } else { 1971 qcow2_alloc_cluster_abort(bs, l2meta); 1972 } 1973 1974 /* Take the request off the list of running requests */ 1975 if (l2meta->nb_clusters != 0) { 1976 QLIST_REMOVE(l2meta, next_in_flight); 1977 } 1978 1979 qemu_co_queue_restart_all(&l2meta->dependent_requests); 1980 1981 next = l2meta->next; 1982 g_free(l2meta); 1983 l2meta = next; 1984 } 1985 out: 1986 *pl2meta = l2meta; 1987 return ret; 1988 } 1989 1990 static coroutine_fn int 1991 qcow2_co_preadv_encrypted(BlockDriverState *bs, 1992 uint64_t file_cluster_offset, 1993 uint64_t offset, 1994 uint64_t bytes, 1995 QEMUIOVector *qiov, 1996 uint64_t qiov_offset) 1997 { 1998 int ret; 1999 BDRVQcow2State *s = bs->opaque; 2000 uint8_t *buf; 2001 2002 assert(bs->encrypted && s->crypto); 2003 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2004 2005 /* 2006 * For encrypted images, read everything into a temporary 2007 * contiguous buffer on which the AES functions can work. 2008 * Also, decryption in a separate buffer is better as it 2009 * prevents the guest from learning information about the 2010 * encrypted nature of the virtual disk. 2011 */ 2012 2013 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2014 if (buf == NULL) { 2015 return -ENOMEM; 2016 } 2017 2018 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2019 ret = bdrv_co_pread(s->data_file, 2020 file_cluster_offset + offset_into_cluster(s, offset), 2021 bytes, buf, 0); 2022 if (ret < 0) { 2023 goto fail; 2024 } 2025 2026 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); 2027 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); 2028 if (qcow2_co_decrypt(bs, 2029 file_cluster_offset + offset_into_cluster(s, offset), 2030 offset, buf, bytes) < 0) 2031 { 2032 ret = -EIO; 2033 goto fail; 2034 } 2035 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2036 2037 fail: 2038 qemu_vfree(buf); 2039 2040 return ret; 2041 } 2042 2043 typedef struct Qcow2AioTask { 2044 AioTask task; 2045 2046 BlockDriverState *bs; 2047 QCow2ClusterType cluster_type; /* only for read */ 2048 uint64_t file_cluster_offset; 2049 uint64_t offset; 2050 uint64_t bytes; 2051 QEMUIOVector *qiov; 2052 uint64_t qiov_offset; 2053 QCowL2Meta *l2meta; /* only for write */ 2054 } Qcow2AioTask; 2055 2056 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2057 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2058 AioTaskPool *pool, 2059 AioTaskFunc func, 2060 QCow2ClusterType cluster_type, 2061 uint64_t file_cluster_offset, 2062 uint64_t offset, 2063 uint64_t bytes, 2064 QEMUIOVector *qiov, 2065 size_t qiov_offset, 2066 QCowL2Meta *l2meta) 2067 { 2068 Qcow2AioTask local_task; 2069 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2070 2071 *task = (Qcow2AioTask) { 2072 .task.func = func, 2073 .bs = bs, 2074 .cluster_type = cluster_type, 2075 .qiov = qiov, 2076 .file_cluster_offset = file_cluster_offset, 2077 .offset = offset, 2078 .bytes = bytes, 2079 .qiov_offset = qiov_offset, 2080 .l2meta = l2meta, 2081 }; 2082 2083 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2084 func == qcow2_co_preadv_task_entry ? "read" : "write", 2085 cluster_type, file_cluster_offset, offset, bytes, 2086 qiov, qiov_offset); 2087 2088 if (!pool) { 2089 return func(&task->task); 2090 } 2091 2092 aio_task_pool_start_task(pool, &task->task); 2093 2094 return 0; 2095 } 2096 2097 static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs, 2098 QCow2ClusterType cluster_type, 2099 uint64_t file_cluster_offset, 2100 uint64_t offset, uint64_t bytes, 2101 QEMUIOVector *qiov, 2102 size_t qiov_offset) 2103 { 2104 BDRVQcow2State *s = bs->opaque; 2105 int offset_in_cluster = offset_into_cluster(s, offset); 2106 2107 switch (cluster_type) { 2108 case QCOW2_CLUSTER_ZERO_PLAIN: 2109 case QCOW2_CLUSTER_ZERO_ALLOC: 2110 /* Both zero types are handled in qcow2_co_preadv_part */ 2111 g_assert_not_reached(); 2112 2113 case QCOW2_CLUSTER_UNALLOCATED: 2114 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2115 2116 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2117 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2118 qiov, qiov_offset, 0); 2119 2120 case QCOW2_CLUSTER_COMPRESSED: 2121 return qcow2_co_preadv_compressed(bs, file_cluster_offset, 2122 offset, bytes, qiov, qiov_offset); 2123 2124 case QCOW2_CLUSTER_NORMAL: 2125 if ((file_cluster_offset & 511) != 0) { 2126 return -EIO; 2127 } 2128 2129 if (bs->encrypted) { 2130 return qcow2_co_preadv_encrypted(bs, file_cluster_offset, 2131 offset, bytes, qiov, qiov_offset); 2132 } 2133 2134 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2135 return bdrv_co_preadv_part(s->data_file, 2136 file_cluster_offset + offset_in_cluster, 2137 bytes, qiov, qiov_offset, 0); 2138 2139 default: 2140 g_assert_not_reached(); 2141 } 2142 2143 g_assert_not_reached(); 2144 } 2145 2146 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task) 2147 { 2148 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2149 2150 assert(!t->l2meta); 2151 2152 return qcow2_co_preadv_task(t->bs, t->cluster_type, t->file_cluster_offset, 2153 t->offset, t->bytes, t->qiov, t->qiov_offset); 2154 } 2155 2156 static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, 2157 uint64_t offset, uint64_t bytes, 2158 QEMUIOVector *qiov, 2159 size_t qiov_offset, int flags) 2160 { 2161 BDRVQcow2State *s = bs->opaque; 2162 int ret = 0; 2163 unsigned int cur_bytes; /* number of bytes in current iteration */ 2164 uint64_t cluster_offset = 0; 2165 AioTaskPool *aio = NULL; 2166 2167 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2168 /* prepare next request */ 2169 cur_bytes = MIN(bytes, INT_MAX); 2170 if (s->crypto) { 2171 cur_bytes = MIN(cur_bytes, 2172 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2173 } 2174 2175 qemu_co_mutex_lock(&s->lock); 2176 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 2177 qemu_co_mutex_unlock(&s->lock); 2178 if (ret < 0) { 2179 goto out; 2180 } 2181 2182 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || 2183 ret == QCOW2_CLUSTER_ZERO_ALLOC || 2184 (ret == QCOW2_CLUSTER_UNALLOCATED && !bs->backing)) 2185 { 2186 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2187 } else { 2188 if (!aio && cur_bytes != bytes) { 2189 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2190 } 2191 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, ret, 2192 cluster_offset, offset, cur_bytes, 2193 qiov, qiov_offset, NULL); 2194 if (ret < 0) { 2195 goto out; 2196 } 2197 } 2198 2199 bytes -= cur_bytes; 2200 offset += cur_bytes; 2201 qiov_offset += cur_bytes; 2202 } 2203 2204 out: 2205 if (aio) { 2206 aio_task_pool_wait_all(aio); 2207 if (ret == 0) { 2208 ret = aio_task_pool_status(aio); 2209 } 2210 g_free(aio); 2211 } 2212 2213 return ret; 2214 } 2215 2216 /* Check if it's possible to merge a write request with the writing of 2217 * the data from the COW regions */ 2218 static bool merge_cow(uint64_t offset, unsigned bytes, 2219 QEMUIOVector *qiov, size_t qiov_offset, 2220 QCowL2Meta *l2meta) 2221 { 2222 QCowL2Meta *m; 2223 2224 for (m = l2meta; m != NULL; m = m->next) { 2225 /* If both COW regions are empty then there's nothing to merge */ 2226 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2227 continue; 2228 } 2229 2230 /* If COW regions are handled already, skip this too */ 2231 if (m->skip_cow) { 2232 continue; 2233 } 2234 2235 /* The data (middle) region must be immediately after the 2236 * start region */ 2237 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2238 continue; 2239 } 2240 2241 /* The end region must be immediately after the data (middle) 2242 * region */ 2243 if (m->offset + m->cow_end.offset != offset + bytes) { 2244 continue; 2245 } 2246 2247 /* Make sure that adding both COW regions to the QEMUIOVector 2248 * does not exceed IOV_MAX */ 2249 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2250 continue; 2251 } 2252 2253 m->data_qiov = qiov; 2254 m->data_qiov_offset = qiov_offset; 2255 return true; 2256 } 2257 2258 return false; 2259 } 2260 2261 static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes) 2262 { 2263 int64_t nr; 2264 return !bytes || 2265 (!bdrv_is_allocated_above(bs, NULL, false, offset, bytes, &nr) && 2266 nr == bytes); 2267 } 2268 2269 static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2270 { 2271 /* 2272 * This check is designed for optimization shortcut so it must be 2273 * efficient. 2274 * Instead of is_zero(), use is_unallocated() as it is faster (but not 2275 * as accurate and can result in false negatives). 2276 */ 2277 return is_unallocated(bs, m->offset + m->cow_start.offset, 2278 m->cow_start.nb_bytes) && 2279 is_unallocated(bs, m->offset + m->cow_end.offset, 2280 m->cow_end.nb_bytes); 2281 } 2282 2283 static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2284 { 2285 BDRVQcow2State *s = bs->opaque; 2286 QCowL2Meta *m; 2287 2288 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2289 return 0; 2290 } 2291 2292 if (bs->encrypted) { 2293 return 0; 2294 } 2295 2296 for (m = l2meta; m != NULL; m = m->next) { 2297 int ret; 2298 2299 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2300 continue; 2301 } 2302 2303 if (!is_zero_cow(bs, m)) { 2304 continue; 2305 } 2306 2307 /* 2308 * instead of writing zero COW buffers, 2309 * efficiently zero out the whole clusters 2310 */ 2311 2312 ret = qcow2_pre_write_overlap_check(bs, 0, m->alloc_offset, 2313 m->nb_clusters * s->cluster_size, 2314 true); 2315 if (ret < 0) { 2316 return ret; 2317 } 2318 2319 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2320 ret = bdrv_co_pwrite_zeroes(s->data_file, m->alloc_offset, 2321 m->nb_clusters * s->cluster_size, 2322 BDRV_REQ_NO_FALLBACK); 2323 if (ret < 0) { 2324 if (ret != -ENOTSUP && ret != -EAGAIN) { 2325 return ret; 2326 } 2327 continue; 2328 } 2329 2330 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2331 m->skip_cow = true; 2332 } 2333 return 0; 2334 } 2335 2336 /* 2337 * qcow2_co_pwritev_task 2338 * Called with s->lock unlocked 2339 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2340 * not use it somehow after qcow2_co_pwritev_task() call 2341 */ 2342 static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs, 2343 uint64_t file_cluster_offset, 2344 uint64_t offset, uint64_t bytes, 2345 QEMUIOVector *qiov, 2346 uint64_t qiov_offset, 2347 QCowL2Meta *l2meta) 2348 { 2349 int ret; 2350 BDRVQcow2State *s = bs->opaque; 2351 void *crypt_buf = NULL; 2352 int offset_in_cluster = offset_into_cluster(s, offset); 2353 QEMUIOVector encrypted_qiov; 2354 2355 if (bs->encrypted) { 2356 assert(s->crypto); 2357 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2358 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2359 if (crypt_buf == NULL) { 2360 ret = -ENOMEM; 2361 goto out_unlocked; 2362 } 2363 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2364 2365 if (qcow2_co_encrypt(bs, file_cluster_offset + offset_in_cluster, 2366 offset, crypt_buf, bytes) < 0) 2367 { 2368 ret = -EIO; 2369 goto out_unlocked; 2370 } 2371 2372 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2373 qiov = &encrypted_qiov; 2374 qiov_offset = 0; 2375 } 2376 2377 /* Try to efficiently initialize the physical space with zeroes */ 2378 ret = handle_alloc_space(bs, l2meta); 2379 if (ret < 0) { 2380 goto out_unlocked; 2381 } 2382 2383 /* 2384 * If we need to do COW, check if it's possible to merge the 2385 * writing of the guest data together with that of the COW regions. 2386 * If it's not possible (or not necessary) then write the 2387 * guest data now. 2388 */ 2389 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2390 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2391 trace_qcow2_writev_data(qemu_coroutine_self(), 2392 file_cluster_offset + offset_in_cluster); 2393 ret = bdrv_co_pwritev_part(s->data_file, 2394 file_cluster_offset + offset_in_cluster, 2395 bytes, qiov, qiov_offset, 0); 2396 if (ret < 0) { 2397 goto out_unlocked; 2398 } 2399 } 2400 2401 qemu_co_mutex_lock(&s->lock); 2402 2403 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2404 goto out_locked; 2405 2406 out_unlocked: 2407 qemu_co_mutex_lock(&s->lock); 2408 2409 out_locked: 2410 qcow2_handle_l2meta(bs, &l2meta, false); 2411 qemu_co_mutex_unlock(&s->lock); 2412 2413 qemu_vfree(crypt_buf); 2414 2415 return ret; 2416 } 2417 2418 static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task) 2419 { 2420 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2421 2422 assert(!t->cluster_type); 2423 2424 return qcow2_co_pwritev_task(t->bs, t->file_cluster_offset, 2425 t->offset, t->bytes, t->qiov, t->qiov_offset, 2426 t->l2meta); 2427 } 2428 2429 static coroutine_fn int qcow2_co_pwritev_part( 2430 BlockDriverState *bs, uint64_t offset, uint64_t bytes, 2431 QEMUIOVector *qiov, size_t qiov_offset, int flags) 2432 { 2433 BDRVQcow2State *s = bs->opaque; 2434 int offset_in_cluster; 2435 int ret; 2436 unsigned int cur_bytes; /* number of sectors in current iteration */ 2437 uint64_t cluster_offset; 2438 QCowL2Meta *l2meta = NULL; 2439 AioTaskPool *aio = NULL; 2440 2441 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2442 2443 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2444 2445 l2meta = NULL; 2446 2447 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2448 offset_in_cluster = offset_into_cluster(s, offset); 2449 cur_bytes = MIN(bytes, INT_MAX); 2450 if (bs->encrypted) { 2451 cur_bytes = MIN(cur_bytes, 2452 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2453 - offset_in_cluster); 2454 } 2455 2456 qemu_co_mutex_lock(&s->lock); 2457 2458 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2459 &cluster_offset, &l2meta); 2460 if (ret < 0) { 2461 goto out_locked; 2462 } 2463 2464 assert((cluster_offset & 511) == 0); 2465 2466 ret = qcow2_pre_write_overlap_check(bs, 0, 2467 cluster_offset + offset_in_cluster, 2468 cur_bytes, true); 2469 if (ret < 0) { 2470 goto out_locked; 2471 } 2472 2473 qemu_co_mutex_unlock(&s->lock); 2474 2475 if (!aio && cur_bytes != bytes) { 2476 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2477 } 2478 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2479 cluster_offset, offset, cur_bytes, 2480 qiov, qiov_offset, l2meta); 2481 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2482 if (ret < 0) { 2483 goto fail_nometa; 2484 } 2485 2486 bytes -= cur_bytes; 2487 offset += cur_bytes; 2488 qiov_offset += cur_bytes; 2489 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2490 } 2491 ret = 0; 2492 2493 qemu_co_mutex_lock(&s->lock); 2494 2495 out_locked: 2496 qcow2_handle_l2meta(bs, &l2meta, false); 2497 2498 qemu_co_mutex_unlock(&s->lock); 2499 2500 fail_nometa: 2501 if (aio) { 2502 aio_task_pool_wait_all(aio); 2503 if (ret == 0) { 2504 ret = aio_task_pool_status(aio); 2505 } 2506 g_free(aio); 2507 } 2508 2509 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2510 2511 return ret; 2512 } 2513 2514 static int qcow2_inactivate(BlockDriverState *bs) 2515 { 2516 BDRVQcow2State *s = bs->opaque; 2517 int ret, result = 0; 2518 Error *local_err = NULL; 2519 2520 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2521 if (local_err != NULL) { 2522 result = -EINVAL; 2523 error_reportf_err(local_err, "Lost persistent bitmaps during " 2524 "inactivation of node '%s': ", 2525 bdrv_get_device_or_node_name(bs)); 2526 } 2527 2528 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2529 if (ret) { 2530 result = ret; 2531 error_report("Failed to flush the L2 table cache: %s", 2532 strerror(-ret)); 2533 } 2534 2535 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2536 if (ret) { 2537 result = ret; 2538 error_report("Failed to flush the refcount block cache: %s", 2539 strerror(-ret)); 2540 } 2541 2542 if (result == 0) { 2543 qcow2_mark_clean(bs); 2544 } 2545 2546 return result; 2547 } 2548 2549 static void qcow2_close(BlockDriverState *bs) 2550 { 2551 BDRVQcow2State *s = bs->opaque; 2552 qemu_vfree(s->l1_table); 2553 /* else pre-write overlap checks in cache_destroy may crash */ 2554 s->l1_table = NULL; 2555 2556 if (!(s->flags & BDRV_O_INACTIVE)) { 2557 qcow2_inactivate(bs); 2558 } 2559 2560 cache_clean_timer_del(bs); 2561 qcow2_cache_destroy(s->l2_table_cache); 2562 qcow2_cache_destroy(s->refcount_block_cache); 2563 2564 qcrypto_block_free(s->crypto); 2565 s->crypto = NULL; 2566 2567 g_free(s->unknown_header_fields); 2568 cleanup_unknown_header_ext(bs); 2569 2570 g_free(s->image_data_file); 2571 g_free(s->image_backing_file); 2572 g_free(s->image_backing_format); 2573 2574 if (has_data_file(bs)) { 2575 bdrv_unref_child(bs, s->data_file); 2576 } 2577 2578 qcow2_refcount_close(bs); 2579 qcow2_free_snapshots(bs); 2580 } 2581 2582 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, 2583 Error **errp) 2584 { 2585 BDRVQcow2State *s = bs->opaque; 2586 int flags = s->flags; 2587 QCryptoBlock *crypto = NULL; 2588 QDict *options; 2589 Error *local_err = NULL; 2590 int ret; 2591 2592 /* 2593 * Backing files are read-only which makes all of their metadata immutable, 2594 * that means we don't have to worry about reopening them here. 2595 */ 2596 2597 crypto = s->crypto; 2598 s->crypto = NULL; 2599 2600 qcow2_close(bs); 2601 2602 memset(s, 0, sizeof(BDRVQcow2State)); 2603 options = qdict_clone_shallow(bs->options); 2604 2605 flags &= ~BDRV_O_INACTIVE; 2606 qemu_co_mutex_lock(&s->lock); 2607 ret = qcow2_do_open(bs, options, flags, &local_err); 2608 qemu_co_mutex_unlock(&s->lock); 2609 qobject_unref(options); 2610 if (local_err) { 2611 error_propagate_prepend(errp, local_err, 2612 "Could not reopen qcow2 layer: "); 2613 bs->drv = NULL; 2614 return; 2615 } else if (ret < 0) { 2616 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2617 bs->drv = NULL; 2618 return; 2619 } 2620 2621 s->crypto = crypto; 2622 } 2623 2624 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2625 size_t len, size_t buflen) 2626 { 2627 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2628 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2629 2630 if (buflen < ext_len) { 2631 return -ENOSPC; 2632 } 2633 2634 *ext_backing_fmt = (QCowExtension) { 2635 .magic = cpu_to_be32(magic), 2636 .len = cpu_to_be32(len), 2637 }; 2638 2639 if (len) { 2640 memcpy(buf + sizeof(QCowExtension), s, len); 2641 } 2642 2643 return ext_len; 2644 } 2645 2646 /* 2647 * Updates the qcow2 header, including the variable length parts of it, i.e. 2648 * the backing file name and all extensions. qcow2 was not designed to allow 2649 * such changes, so if we run out of space (we can only use the first cluster) 2650 * this function may fail. 2651 * 2652 * Returns 0 on success, -errno in error cases. 2653 */ 2654 int qcow2_update_header(BlockDriverState *bs) 2655 { 2656 BDRVQcow2State *s = bs->opaque; 2657 QCowHeader *header; 2658 char *buf; 2659 size_t buflen = s->cluster_size; 2660 int ret; 2661 uint64_t total_size; 2662 uint32_t refcount_table_clusters; 2663 size_t header_length; 2664 Qcow2UnknownHeaderExtension *uext; 2665 2666 buf = qemu_blockalign(bs, buflen); 2667 2668 /* Header structure */ 2669 header = (QCowHeader*) buf; 2670 2671 if (buflen < sizeof(*header)) { 2672 ret = -ENOSPC; 2673 goto fail; 2674 } 2675 2676 header_length = sizeof(*header) + s->unknown_header_fields_size; 2677 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2678 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2679 2680 *header = (QCowHeader) { 2681 /* Version 2 fields */ 2682 .magic = cpu_to_be32(QCOW_MAGIC), 2683 .version = cpu_to_be32(s->qcow_version), 2684 .backing_file_offset = 0, 2685 .backing_file_size = 0, 2686 .cluster_bits = cpu_to_be32(s->cluster_bits), 2687 .size = cpu_to_be64(total_size), 2688 .crypt_method = cpu_to_be32(s->crypt_method_header), 2689 .l1_size = cpu_to_be32(s->l1_size), 2690 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2691 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2692 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2693 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2694 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2695 2696 /* Version 3 fields */ 2697 .incompatible_features = cpu_to_be64(s->incompatible_features), 2698 .compatible_features = cpu_to_be64(s->compatible_features), 2699 .autoclear_features = cpu_to_be64(s->autoclear_features), 2700 .refcount_order = cpu_to_be32(s->refcount_order), 2701 .header_length = cpu_to_be32(header_length), 2702 }; 2703 2704 /* For older versions, write a shorter header */ 2705 switch (s->qcow_version) { 2706 case 2: 2707 ret = offsetof(QCowHeader, incompatible_features); 2708 break; 2709 case 3: 2710 ret = sizeof(*header); 2711 break; 2712 default: 2713 ret = -EINVAL; 2714 goto fail; 2715 } 2716 2717 buf += ret; 2718 buflen -= ret; 2719 memset(buf, 0, buflen); 2720 2721 /* Preserve any unknown field in the header */ 2722 if (s->unknown_header_fields_size) { 2723 if (buflen < s->unknown_header_fields_size) { 2724 ret = -ENOSPC; 2725 goto fail; 2726 } 2727 2728 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2729 buf += s->unknown_header_fields_size; 2730 buflen -= s->unknown_header_fields_size; 2731 } 2732 2733 /* Backing file format header extension */ 2734 if (s->image_backing_format) { 2735 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2736 s->image_backing_format, 2737 strlen(s->image_backing_format), 2738 buflen); 2739 if (ret < 0) { 2740 goto fail; 2741 } 2742 2743 buf += ret; 2744 buflen -= ret; 2745 } 2746 2747 /* External data file header extension */ 2748 if (has_data_file(bs) && s->image_data_file) { 2749 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 2750 s->image_data_file, strlen(s->image_data_file), 2751 buflen); 2752 if (ret < 0) { 2753 goto fail; 2754 } 2755 2756 buf += ret; 2757 buflen -= ret; 2758 } 2759 2760 /* Full disk encryption header pointer extension */ 2761 if (s->crypto_header.offset != 0) { 2762 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 2763 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 2764 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2765 &s->crypto_header, sizeof(s->crypto_header), 2766 buflen); 2767 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 2768 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 2769 if (ret < 0) { 2770 goto fail; 2771 } 2772 buf += ret; 2773 buflen -= ret; 2774 } 2775 2776 /* Feature table */ 2777 if (s->qcow_version >= 3) { 2778 Qcow2Feature features[] = { 2779 { 2780 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2781 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2782 .name = "dirty bit", 2783 }, 2784 { 2785 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2786 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2787 .name = "corrupt bit", 2788 }, 2789 { 2790 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2791 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 2792 .name = "external data file", 2793 }, 2794 { 2795 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2796 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2797 .name = "lazy refcounts", 2798 }, 2799 }; 2800 2801 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2802 features, sizeof(features), buflen); 2803 if (ret < 0) { 2804 goto fail; 2805 } 2806 buf += ret; 2807 buflen -= ret; 2808 } 2809 2810 /* Bitmap extension */ 2811 if (s->nb_bitmaps > 0) { 2812 Qcow2BitmapHeaderExt bitmaps_header = { 2813 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2814 .bitmap_directory_size = 2815 cpu_to_be64(s->bitmap_directory_size), 2816 .bitmap_directory_offset = 2817 cpu_to_be64(s->bitmap_directory_offset) 2818 }; 2819 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2820 &bitmaps_header, sizeof(bitmaps_header), 2821 buflen); 2822 if (ret < 0) { 2823 goto fail; 2824 } 2825 buf += ret; 2826 buflen -= ret; 2827 } 2828 2829 /* Keep unknown header extensions */ 2830 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2831 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2832 if (ret < 0) { 2833 goto fail; 2834 } 2835 2836 buf += ret; 2837 buflen -= ret; 2838 } 2839 2840 /* End of header extensions */ 2841 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2842 if (ret < 0) { 2843 goto fail; 2844 } 2845 2846 buf += ret; 2847 buflen -= ret; 2848 2849 /* Backing file name */ 2850 if (s->image_backing_file) { 2851 size_t backing_file_len = strlen(s->image_backing_file); 2852 2853 if (buflen < backing_file_len) { 2854 ret = -ENOSPC; 2855 goto fail; 2856 } 2857 2858 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2859 strncpy(buf, s->image_backing_file, buflen); 2860 2861 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2862 header->backing_file_size = cpu_to_be32(backing_file_len); 2863 } 2864 2865 /* Write the new header */ 2866 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2867 if (ret < 0) { 2868 goto fail; 2869 } 2870 2871 ret = 0; 2872 fail: 2873 qemu_vfree(header); 2874 return ret; 2875 } 2876 2877 static int qcow2_change_backing_file(BlockDriverState *bs, 2878 const char *backing_file, const char *backing_fmt) 2879 { 2880 BDRVQcow2State *s = bs->opaque; 2881 2882 /* Adding a backing file means that the external data file alone won't be 2883 * enough to make sense of the content */ 2884 if (backing_file && data_file_is_raw(bs)) { 2885 return -EINVAL; 2886 } 2887 2888 if (backing_file && strlen(backing_file) > 1023) { 2889 return -EINVAL; 2890 } 2891 2892 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 2893 backing_file ?: ""); 2894 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2895 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2896 2897 g_free(s->image_backing_file); 2898 g_free(s->image_backing_format); 2899 2900 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2901 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2902 2903 return qcow2_update_header(bs); 2904 } 2905 2906 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2907 { 2908 if (g_str_equal(encryptfmt, "luks")) { 2909 return QCOW_CRYPT_LUKS; 2910 } else if (g_str_equal(encryptfmt, "aes")) { 2911 return QCOW_CRYPT_AES; 2912 } else { 2913 return -EINVAL; 2914 } 2915 } 2916 2917 static int qcow2_set_up_encryption(BlockDriverState *bs, 2918 QCryptoBlockCreateOptions *cryptoopts, 2919 Error **errp) 2920 { 2921 BDRVQcow2State *s = bs->opaque; 2922 QCryptoBlock *crypto = NULL; 2923 int fmt, ret; 2924 2925 switch (cryptoopts->format) { 2926 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 2927 fmt = QCOW_CRYPT_LUKS; 2928 break; 2929 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 2930 fmt = QCOW_CRYPT_AES; 2931 break; 2932 default: 2933 error_setg(errp, "Crypto format not supported in qcow2"); 2934 return -EINVAL; 2935 } 2936 2937 s->crypt_method_header = fmt; 2938 2939 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 2940 qcow2_crypto_hdr_init_func, 2941 qcow2_crypto_hdr_write_func, 2942 bs, errp); 2943 if (!crypto) { 2944 return -EINVAL; 2945 } 2946 2947 ret = qcow2_update_header(bs); 2948 if (ret < 0) { 2949 error_setg_errno(errp, -ret, "Could not write encryption header"); 2950 goto out; 2951 } 2952 2953 ret = 0; 2954 out: 2955 qcrypto_block_free(crypto); 2956 return ret; 2957 } 2958 2959 /** 2960 * Preallocates metadata structures for data clusters between @offset (in the 2961 * guest disk) and @new_length (which is thus generally the new guest disk 2962 * size). 2963 * 2964 * Returns: 0 on success, -errno on failure. 2965 */ 2966 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset, 2967 uint64_t new_length, PreallocMode mode, 2968 Error **errp) 2969 { 2970 BDRVQcow2State *s = bs->opaque; 2971 uint64_t bytes; 2972 uint64_t host_offset = 0; 2973 int64_t file_length; 2974 unsigned int cur_bytes; 2975 int ret; 2976 QCowL2Meta *meta; 2977 2978 assert(offset <= new_length); 2979 bytes = new_length - offset; 2980 2981 while (bytes) { 2982 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 2983 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2984 &host_offset, &meta); 2985 if (ret < 0) { 2986 error_setg_errno(errp, -ret, "Allocating clusters failed"); 2987 return ret; 2988 } 2989 2990 while (meta) { 2991 QCowL2Meta *next = meta->next; 2992 2993 ret = qcow2_alloc_cluster_link_l2(bs, meta); 2994 if (ret < 0) { 2995 error_setg_errno(errp, -ret, "Mapping clusters failed"); 2996 qcow2_free_any_clusters(bs, meta->alloc_offset, 2997 meta->nb_clusters, QCOW2_DISCARD_NEVER); 2998 return ret; 2999 } 3000 3001 /* There are no dependent requests, but we need to remove our 3002 * request from the list of in-flight requests */ 3003 QLIST_REMOVE(meta, next_in_flight); 3004 3005 g_free(meta); 3006 meta = next; 3007 } 3008 3009 /* TODO Preallocate data if requested */ 3010 3011 bytes -= cur_bytes; 3012 offset += cur_bytes; 3013 } 3014 3015 /* 3016 * It is expected that the image file is large enough to actually contain 3017 * all of the allocated clusters (otherwise we get failing reads after 3018 * EOF). Extend the image to the last allocated sector. 3019 */ 3020 file_length = bdrv_getlength(s->data_file->bs); 3021 if (file_length < 0) { 3022 error_setg_errno(errp, -file_length, "Could not get file size"); 3023 return file_length; 3024 } 3025 3026 if (host_offset + cur_bytes > file_length) { 3027 if (mode == PREALLOC_MODE_METADATA) { 3028 mode = PREALLOC_MODE_OFF; 3029 } 3030 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, mode, 3031 errp); 3032 if (ret < 0) { 3033 return ret; 3034 } 3035 } 3036 3037 return 0; 3038 } 3039 3040 /* qcow2_refcount_metadata_size: 3041 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3042 * @cluster_size: size of a cluster, in bytes 3043 * @refcount_order: refcount bits power-of-2 exponent 3044 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3045 * needs to be 3046 * 3047 * Returns: Number of bytes required for refcount blocks and table metadata. 3048 */ 3049 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3050 int refcount_order, bool generous_increase, 3051 uint64_t *refblock_count) 3052 { 3053 /* 3054 * Every host cluster is reference-counted, including metadata (even 3055 * refcount metadata is recursively included). 3056 * 3057 * An accurate formula for the size of refcount metadata size is difficult 3058 * to derive. An easier method of calculation is finding the fixed point 3059 * where no further refcount blocks or table clusters are required to 3060 * reference count every cluster. 3061 */ 3062 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 3063 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3064 int64_t table = 0; /* number of refcount table clusters */ 3065 int64_t blocks = 0; /* number of refcount block clusters */ 3066 int64_t last; 3067 int64_t n = 0; 3068 3069 do { 3070 last = n; 3071 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3072 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3073 n = clusters + blocks + table; 3074 3075 if (n == last && generous_increase) { 3076 clusters += DIV_ROUND_UP(table, 2); 3077 n = 0; /* force another loop */ 3078 generous_increase = false; 3079 } 3080 } while (n != last); 3081 3082 if (refblock_count) { 3083 *refblock_count = blocks; 3084 } 3085 3086 return (blocks + table) * cluster_size; 3087 } 3088 3089 /** 3090 * qcow2_calc_prealloc_size: 3091 * @total_size: virtual disk size in bytes 3092 * @cluster_size: cluster size in bytes 3093 * @refcount_order: refcount bits power-of-2 exponent 3094 * 3095 * Returns: Total number of bytes required for the fully allocated image 3096 * (including metadata). 3097 */ 3098 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3099 size_t cluster_size, 3100 int refcount_order) 3101 { 3102 int64_t meta_size = 0; 3103 uint64_t nl1e, nl2e; 3104 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3105 3106 /* header: 1 cluster */ 3107 meta_size += cluster_size; 3108 3109 /* total size of L2 tables */ 3110 nl2e = aligned_total_size / cluster_size; 3111 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t)); 3112 meta_size += nl2e * sizeof(uint64_t); 3113 3114 /* total size of L1 tables */ 3115 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 3116 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t)); 3117 meta_size += nl1e * sizeof(uint64_t); 3118 3119 /* total size of refcount table and blocks */ 3120 meta_size += qcow2_refcount_metadata_size( 3121 (meta_size + aligned_total_size) / cluster_size, 3122 cluster_size, refcount_order, false, NULL); 3123 3124 return meta_size + aligned_total_size; 3125 } 3126 3127 static bool validate_cluster_size(size_t cluster_size, Error **errp) 3128 { 3129 int cluster_bits = ctz32(cluster_size); 3130 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3131 (1 << cluster_bits) != cluster_size) 3132 { 3133 error_setg(errp, "Cluster size must be a power of two between %d and " 3134 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3135 return false; 3136 } 3137 return true; 3138 } 3139 3140 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 3141 { 3142 size_t cluster_size; 3143 3144 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3145 DEFAULT_CLUSTER_SIZE); 3146 if (!validate_cluster_size(cluster_size, errp)) { 3147 return 0; 3148 } 3149 return cluster_size; 3150 } 3151 3152 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3153 { 3154 char *buf; 3155 int ret; 3156 3157 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3158 if (!buf) { 3159 ret = 3; /* default */ 3160 } else if (!strcmp(buf, "0.10")) { 3161 ret = 2; 3162 } else if (!strcmp(buf, "1.1")) { 3163 ret = 3; 3164 } else { 3165 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3166 ret = -EINVAL; 3167 } 3168 g_free(buf); 3169 return ret; 3170 } 3171 3172 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3173 Error **errp) 3174 { 3175 uint64_t refcount_bits; 3176 3177 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3178 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3179 error_setg(errp, "Refcount width must be a power of two and may not " 3180 "exceed 64 bits"); 3181 return 0; 3182 } 3183 3184 if (version < 3 && refcount_bits != 16) { 3185 error_setg(errp, "Different refcount widths than 16 bits require " 3186 "compatibility level 1.1 or above (use compat=1.1 or " 3187 "greater)"); 3188 return 0; 3189 } 3190 3191 return refcount_bits; 3192 } 3193 3194 static int coroutine_fn 3195 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3196 { 3197 BlockdevCreateOptionsQcow2 *qcow2_opts; 3198 QDict *options; 3199 3200 /* 3201 * Open the image file and write a minimal qcow2 header. 3202 * 3203 * We keep things simple and start with a zero-sized image. We also 3204 * do without refcount blocks or a L1 table for now. We'll fix the 3205 * inconsistency later. 3206 * 3207 * We do need a refcount table because growing the refcount table means 3208 * allocating two new refcount blocks - the seconds of which would be at 3209 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3210 * size for any qcow2 image. 3211 */ 3212 BlockBackend *blk = NULL; 3213 BlockDriverState *bs = NULL; 3214 BlockDriverState *data_bs = NULL; 3215 QCowHeader *header; 3216 size_t cluster_size; 3217 int version; 3218 int refcount_order; 3219 uint64_t* refcount_table; 3220 Error *local_err = NULL; 3221 int ret; 3222 3223 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3224 qcow2_opts = &create_options->u.qcow2; 3225 3226 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp); 3227 if (bs == NULL) { 3228 return -EIO; 3229 } 3230 3231 /* Validate options and set default values */ 3232 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3233 error_setg(errp, "Image size must be a multiple of 512 bytes"); 3234 ret = -EINVAL; 3235 goto out; 3236 } 3237 3238 if (qcow2_opts->has_version) { 3239 switch (qcow2_opts->version) { 3240 case BLOCKDEV_QCOW2_VERSION_V2: 3241 version = 2; 3242 break; 3243 case BLOCKDEV_QCOW2_VERSION_V3: 3244 version = 3; 3245 break; 3246 default: 3247 g_assert_not_reached(); 3248 } 3249 } else { 3250 version = 3; 3251 } 3252 3253 if (qcow2_opts->has_cluster_size) { 3254 cluster_size = qcow2_opts->cluster_size; 3255 } else { 3256 cluster_size = DEFAULT_CLUSTER_SIZE; 3257 } 3258 3259 if (!validate_cluster_size(cluster_size, errp)) { 3260 ret = -EINVAL; 3261 goto out; 3262 } 3263 3264 if (!qcow2_opts->has_preallocation) { 3265 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3266 } 3267 if (qcow2_opts->has_backing_file && 3268 qcow2_opts->preallocation != PREALLOC_MODE_OFF) 3269 { 3270 error_setg(errp, "Backing file and preallocation cannot be used at " 3271 "the same time"); 3272 ret = -EINVAL; 3273 goto out; 3274 } 3275 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { 3276 error_setg(errp, "Backing format cannot be used without backing file"); 3277 ret = -EINVAL; 3278 goto out; 3279 } 3280 3281 if (!qcow2_opts->has_lazy_refcounts) { 3282 qcow2_opts->lazy_refcounts = false; 3283 } 3284 if (version < 3 && qcow2_opts->lazy_refcounts) { 3285 error_setg(errp, "Lazy refcounts only supported with compatibility " 3286 "level 1.1 and above (use version=v3 or greater)"); 3287 ret = -EINVAL; 3288 goto out; 3289 } 3290 3291 if (!qcow2_opts->has_refcount_bits) { 3292 qcow2_opts->refcount_bits = 16; 3293 } 3294 if (qcow2_opts->refcount_bits > 64 || 3295 !is_power_of_2(qcow2_opts->refcount_bits)) 3296 { 3297 error_setg(errp, "Refcount width must be a power of two and may not " 3298 "exceed 64 bits"); 3299 ret = -EINVAL; 3300 goto out; 3301 } 3302 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3303 error_setg(errp, "Different refcount widths than 16 bits require " 3304 "compatibility level 1.1 or above (use version=v3 or " 3305 "greater)"); 3306 ret = -EINVAL; 3307 goto out; 3308 } 3309 refcount_order = ctz32(qcow2_opts->refcount_bits); 3310 3311 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3312 error_setg(errp, "data-file-raw requires data-file"); 3313 ret = -EINVAL; 3314 goto out; 3315 } 3316 if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) { 3317 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3318 "the same time"); 3319 ret = -EINVAL; 3320 goto out; 3321 } 3322 3323 if (qcow2_opts->data_file) { 3324 if (version < 3) { 3325 error_setg(errp, "External data files are only supported with " 3326 "compatibility level 1.1 and above (use version=v3 or " 3327 "greater)"); 3328 ret = -EINVAL; 3329 goto out; 3330 } 3331 data_bs = bdrv_open_blockdev_ref(qcow2_opts->data_file, errp); 3332 if (data_bs == NULL) { 3333 ret = -EIO; 3334 goto out; 3335 } 3336 } 3337 3338 /* Create BlockBackend to write to the image */ 3339 blk = blk_new(bdrv_get_aio_context(bs), 3340 BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL); 3341 ret = blk_insert_bs(blk, bs, errp); 3342 if (ret < 0) { 3343 goto out; 3344 } 3345 blk_set_allow_write_beyond_eof(blk, true); 3346 3347 /* Clear the protocol layer and preallocate it if necessary */ 3348 ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); 3349 if (ret < 0) { 3350 goto out; 3351 } 3352 3353 /* Write the header */ 3354 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3355 header = g_malloc0(cluster_size); 3356 *header = (QCowHeader) { 3357 .magic = cpu_to_be32(QCOW_MAGIC), 3358 .version = cpu_to_be32(version), 3359 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3360 .size = cpu_to_be64(0), 3361 .l1_table_offset = cpu_to_be64(0), 3362 .l1_size = cpu_to_be32(0), 3363 .refcount_table_offset = cpu_to_be64(cluster_size), 3364 .refcount_table_clusters = cpu_to_be32(1), 3365 .refcount_order = cpu_to_be32(refcount_order), 3366 .header_length = cpu_to_be32(sizeof(*header)), 3367 }; 3368 3369 /* We'll update this to correct value later */ 3370 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3371 3372 if (qcow2_opts->lazy_refcounts) { 3373 header->compatible_features |= 3374 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3375 } 3376 if (data_bs) { 3377 header->incompatible_features |= 3378 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3379 } 3380 if (qcow2_opts->data_file_raw) { 3381 header->autoclear_features |= 3382 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3383 } 3384 3385 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 3386 g_free(header); 3387 if (ret < 0) { 3388 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3389 goto out; 3390 } 3391 3392 /* Write a refcount table with one refcount block */ 3393 refcount_table = g_malloc0(2 * cluster_size); 3394 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3395 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 3396 g_free(refcount_table); 3397 3398 if (ret < 0) { 3399 error_setg_errno(errp, -ret, "Could not write refcount table"); 3400 goto out; 3401 } 3402 3403 blk_unref(blk); 3404 blk = NULL; 3405 3406 /* 3407 * And now open the image and make it consistent first (i.e. increase the 3408 * refcount of the cluster that is occupied by the header and the refcount 3409 * table) 3410 */ 3411 options = qdict_new(); 3412 qdict_put_str(options, "driver", "qcow2"); 3413 qdict_put_str(options, "file", bs->node_name); 3414 if (data_bs) { 3415 qdict_put_str(options, "data-file", data_bs->node_name); 3416 } 3417 blk = blk_new_open(NULL, NULL, options, 3418 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3419 &local_err); 3420 if (blk == NULL) { 3421 error_propagate(errp, local_err); 3422 ret = -EIO; 3423 goto out; 3424 } 3425 3426 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3427 if (ret < 0) { 3428 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3429 "header and refcount table"); 3430 goto out; 3431 3432 } else if (ret != 0) { 3433 error_report("Huh, first cluster in empty image is already in use?"); 3434 abort(); 3435 } 3436 3437 /* Set the external data file if necessary */ 3438 if (data_bs) { 3439 BDRVQcow2State *s = blk_bs(blk)->opaque; 3440 s->image_data_file = g_strdup(data_bs->filename); 3441 } 3442 3443 /* Create a full header (including things like feature table) */ 3444 ret = qcow2_update_header(blk_bs(blk)); 3445 if (ret < 0) { 3446 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3447 goto out; 3448 } 3449 3450 /* Okay, now that we have a valid image, let's give it the right size */ 3451 ret = blk_truncate(blk, qcow2_opts->size, qcow2_opts->preallocation, errp); 3452 if (ret < 0) { 3453 error_prepend(errp, "Could not resize image: "); 3454 goto out; 3455 } 3456 3457 /* Want a backing file? There you go.*/ 3458 if (qcow2_opts->has_backing_file) { 3459 const char *backing_format = NULL; 3460 3461 if (qcow2_opts->has_backing_fmt) { 3462 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3463 } 3464 3465 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3466 backing_format); 3467 if (ret < 0) { 3468 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3469 "with format '%s'", qcow2_opts->backing_file, 3470 backing_format); 3471 goto out; 3472 } 3473 } 3474 3475 /* Want encryption? There you go. */ 3476 if (qcow2_opts->has_encrypt) { 3477 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3478 if (ret < 0) { 3479 goto out; 3480 } 3481 } 3482 3483 blk_unref(blk); 3484 blk = NULL; 3485 3486 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3487 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3488 * have to setup decryption context. We're not doing any I/O on the top 3489 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3490 * not have effect. 3491 */ 3492 options = qdict_new(); 3493 qdict_put_str(options, "driver", "qcow2"); 3494 qdict_put_str(options, "file", bs->node_name); 3495 if (data_bs) { 3496 qdict_put_str(options, "data-file", data_bs->node_name); 3497 } 3498 blk = blk_new_open(NULL, NULL, options, 3499 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3500 &local_err); 3501 if (blk == NULL) { 3502 error_propagate(errp, local_err); 3503 ret = -EIO; 3504 goto out; 3505 } 3506 3507 ret = 0; 3508 out: 3509 blk_unref(blk); 3510 bdrv_unref(bs); 3511 bdrv_unref(data_bs); 3512 return ret; 3513 } 3514 3515 static int coroutine_fn qcow2_co_create_opts(const char *filename, QemuOpts *opts, 3516 Error **errp) 3517 { 3518 BlockdevCreateOptions *create_options = NULL; 3519 QDict *qdict; 3520 Visitor *v; 3521 BlockDriverState *bs = NULL; 3522 BlockDriverState *data_bs = NULL; 3523 Error *local_err = NULL; 3524 const char *val; 3525 int ret; 3526 3527 /* Only the keyval visitor supports the dotted syntax needed for 3528 * encryption, so go through a QDict before getting a QAPI type. Ignore 3529 * options meant for the protocol layer so that the visitor doesn't 3530 * complain. */ 3531 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3532 true); 3533 3534 /* Handle encryption options */ 3535 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3536 if (val && !strcmp(val, "on")) { 3537 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3538 } else if (val && !strcmp(val, "off")) { 3539 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3540 } 3541 3542 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3543 if (val && !strcmp(val, "aes")) { 3544 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3545 } 3546 3547 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3548 * version=v2/v3 below. */ 3549 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3550 if (val && !strcmp(val, "0.10")) { 3551 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3552 } else if (val && !strcmp(val, "1.1")) { 3553 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3554 } 3555 3556 /* Change legacy command line options into QMP ones */ 3557 static const QDictRenames opt_renames[] = { 3558 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3559 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3560 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3561 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3562 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3563 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3564 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3565 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3566 { NULL, NULL }, 3567 }; 3568 3569 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3570 ret = -EINVAL; 3571 goto finish; 3572 } 3573 3574 /* Create and open the file (protocol layer) */ 3575 ret = bdrv_create_file(filename, opts, errp); 3576 if (ret < 0) { 3577 goto finish; 3578 } 3579 3580 bs = bdrv_open(filename, NULL, NULL, 3581 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3582 if (bs == NULL) { 3583 ret = -EIO; 3584 goto finish; 3585 } 3586 3587 /* Create and open an external data file (protocol layer) */ 3588 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3589 if (val) { 3590 ret = bdrv_create_file(val, opts, errp); 3591 if (ret < 0) { 3592 goto finish; 3593 } 3594 3595 data_bs = bdrv_open(val, NULL, NULL, 3596 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3597 errp); 3598 if (data_bs == NULL) { 3599 ret = -EIO; 3600 goto finish; 3601 } 3602 3603 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3604 qdict_put_str(qdict, "data-file", data_bs->node_name); 3605 } 3606 3607 /* Set 'driver' and 'node' options */ 3608 qdict_put_str(qdict, "driver", "qcow2"); 3609 qdict_put_str(qdict, "file", bs->node_name); 3610 3611 /* Now get the QAPI type BlockdevCreateOptions */ 3612 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3613 if (!v) { 3614 ret = -EINVAL; 3615 goto finish; 3616 } 3617 3618 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 3619 visit_free(v); 3620 3621 if (local_err) { 3622 error_propagate(errp, local_err); 3623 ret = -EINVAL; 3624 goto finish; 3625 } 3626 3627 /* Silently round up size */ 3628 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3629 BDRV_SECTOR_SIZE); 3630 3631 /* Create the qcow2 image (format layer) */ 3632 ret = qcow2_co_create(create_options, errp); 3633 if (ret < 0) { 3634 goto finish; 3635 } 3636 3637 ret = 0; 3638 finish: 3639 qobject_unref(qdict); 3640 bdrv_unref(bs); 3641 bdrv_unref(data_bs); 3642 qapi_free_BlockdevCreateOptions(create_options); 3643 return ret; 3644 } 3645 3646 3647 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3648 { 3649 int64_t nr; 3650 int res; 3651 3652 /* Clamp to image length, before checking status of underlying sectors */ 3653 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3654 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3655 } 3656 3657 if (!bytes) { 3658 return true; 3659 } 3660 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3661 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes; 3662 } 3663 3664 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3665 int64_t offset, int bytes, BdrvRequestFlags flags) 3666 { 3667 int ret; 3668 BDRVQcow2State *s = bs->opaque; 3669 3670 uint32_t head = offset % s->cluster_size; 3671 uint32_t tail = (offset + bytes) % s->cluster_size; 3672 3673 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3674 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3675 tail = 0; 3676 } 3677 3678 if (head || tail) { 3679 uint64_t off; 3680 unsigned int nr; 3681 3682 assert(head + bytes <= s->cluster_size); 3683 3684 /* check whether remainder of cluster already reads as zero */ 3685 if (!(is_zero(bs, offset - head, head) && 3686 is_zero(bs, offset + bytes, 3687 tail ? s->cluster_size - tail : 0))) { 3688 return -ENOTSUP; 3689 } 3690 3691 qemu_co_mutex_lock(&s->lock); 3692 /* We can have new write after previous check */ 3693 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size); 3694 bytes = s->cluster_size; 3695 nr = s->cluster_size; 3696 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3697 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3698 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3699 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3700 qemu_co_mutex_unlock(&s->lock); 3701 return -ENOTSUP; 3702 } 3703 } else { 3704 qemu_co_mutex_lock(&s->lock); 3705 } 3706 3707 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3708 3709 /* Whatever is left can use real zero clusters */ 3710 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3711 qemu_co_mutex_unlock(&s->lock); 3712 3713 return ret; 3714 } 3715 3716 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3717 int64_t offset, int bytes) 3718 { 3719 int ret; 3720 BDRVQcow2State *s = bs->opaque; 3721 3722 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3723 assert(bytes < s->cluster_size); 3724 /* Ignore partial clusters, except for the special case of the 3725 * complete partial cluster at the end of an unaligned file */ 3726 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3727 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3728 return -ENOTSUP; 3729 } 3730 } 3731 3732 qemu_co_mutex_lock(&s->lock); 3733 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3734 false); 3735 qemu_co_mutex_unlock(&s->lock); 3736 return ret; 3737 } 3738 3739 static int coroutine_fn 3740 qcow2_co_copy_range_from(BlockDriverState *bs, 3741 BdrvChild *src, uint64_t src_offset, 3742 BdrvChild *dst, uint64_t dst_offset, 3743 uint64_t bytes, BdrvRequestFlags read_flags, 3744 BdrvRequestFlags write_flags) 3745 { 3746 BDRVQcow2State *s = bs->opaque; 3747 int ret; 3748 unsigned int cur_bytes; /* number of bytes in current iteration */ 3749 BdrvChild *child = NULL; 3750 BdrvRequestFlags cur_write_flags; 3751 3752 assert(!bs->encrypted); 3753 qemu_co_mutex_lock(&s->lock); 3754 3755 while (bytes != 0) { 3756 uint64_t copy_offset = 0; 3757 /* prepare next request */ 3758 cur_bytes = MIN(bytes, INT_MAX); 3759 cur_write_flags = write_flags; 3760 3761 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); 3762 if (ret < 0) { 3763 goto out; 3764 } 3765 3766 switch (ret) { 3767 case QCOW2_CLUSTER_UNALLOCATED: 3768 if (bs->backing && bs->backing->bs) { 3769 int64_t backing_length = bdrv_getlength(bs->backing->bs); 3770 if (src_offset >= backing_length) { 3771 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3772 } else { 3773 child = bs->backing; 3774 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 3775 copy_offset = src_offset; 3776 } 3777 } else { 3778 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3779 } 3780 break; 3781 3782 case QCOW2_CLUSTER_ZERO_PLAIN: 3783 case QCOW2_CLUSTER_ZERO_ALLOC: 3784 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3785 break; 3786 3787 case QCOW2_CLUSTER_COMPRESSED: 3788 ret = -ENOTSUP; 3789 goto out; 3790 3791 case QCOW2_CLUSTER_NORMAL: 3792 child = s->data_file; 3793 copy_offset += offset_into_cluster(s, src_offset); 3794 if ((copy_offset & 511) != 0) { 3795 ret = -EIO; 3796 goto out; 3797 } 3798 break; 3799 3800 default: 3801 abort(); 3802 } 3803 qemu_co_mutex_unlock(&s->lock); 3804 ret = bdrv_co_copy_range_from(child, 3805 copy_offset, 3806 dst, dst_offset, 3807 cur_bytes, read_flags, cur_write_flags); 3808 qemu_co_mutex_lock(&s->lock); 3809 if (ret < 0) { 3810 goto out; 3811 } 3812 3813 bytes -= cur_bytes; 3814 src_offset += cur_bytes; 3815 dst_offset += cur_bytes; 3816 } 3817 ret = 0; 3818 3819 out: 3820 qemu_co_mutex_unlock(&s->lock); 3821 return ret; 3822 } 3823 3824 static int coroutine_fn 3825 qcow2_co_copy_range_to(BlockDriverState *bs, 3826 BdrvChild *src, uint64_t src_offset, 3827 BdrvChild *dst, uint64_t dst_offset, 3828 uint64_t bytes, BdrvRequestFlags read_flags, 3829 BdrvRequestFlags write_flags) 3830 { 3831 BDRVQcow2State *s = bs->opaque; 3832 int offset_in_cluster; 3833 int ret; 3834 unsigned int cur_bytes; /* number of sectors in current iteration */ 3835 uint64_t cluster_offset; 3836 QCowL2Meta *l2meta = NULL; 3837 3838 assert(!bs->encrypted); 3839 3840 qemu_co_mutex_lock(&s->lock); 3841 3842 while (bytes != 0) { 3843 3844 l2meta = NULL; 3845 3846 offset_in_cluster = offset_into_cluster(s, dst_offset); 3847 cur_bytes = MIN(bytes, INT_MAX); 3848 3849 /* TODO: 3850 * If src->bs == dst->bs, we could simply copy by incrementing 3851 * the refcnt, without copying user data. 3852 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 3853 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, 3854 &cluster_offset, &l2meta); 3855 if (ret < 0) { 3856 goto fail; 3857 } 3858 3859 assert((cluster_offset & 511) == 0); 3860 3861 ret = qcow2_pre_write_overlap_check(bs, 0, 3862 cluster_offset + offset_in_cluster, cur_bytes, true); 3863 if (ret < 0) { 3864 goto fail; 3865 } 3866 3867 qemu_co_mutex_unlock(&s->lock); 3868 ret = bdrv_co_copy_range_to(src, src_offset, 3869 s->data_file, 3870 cluster_offset + offset_in_cluster, 3871 cur_bytes, read_flags, write_flags); 3872 qemu_co_mutex_lock(&s->lock); 3873 if (ret < 0) { 3874 goto fail; 3875 } 3876 3877 ret = qcow2_handle_l2meta(bs, &l2meta, true); 3878 if (ret) { 3879 goto fail; 3880 } 3881 3882 bytes -= cur_bytes; 3883 src_offset += cur_bytes; 3884 dst_offset += cur_bytes; 3885 } 3886 ret = 0; 3887 3888 fail: 3889 qcow2_handle_l2meta(bs, &l2meta, false); 3890 3891 qemu_co_mutex_unlock(&s->lock); 3892 3893 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 3894 3895 return ret; 3896 } 3897 3898 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, 3899 PreallocMode prealloc, Error **errp) 3900 { 3901 BDRVQcow2State *s = bs->opaque; 3902 uint64_t old_length; 3903 int64_t new_l1_size; 3904 int ret; 3905 QDict *options; 3906 3907 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3908 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3909 { 3910 error_setg(errp, "Unsupported preallocation mode '%s'", 3911 PreallocMode_str(prealloc)); 3912 return -ENOTSUP; 3913 } 3914 3915 if (offset & 511) { 3916 error_setg(errp, "The new size must be a multiple of 512"); 3917 return -EINVAL; 3918 } 3919 3920 qemu_co_mutex_lock(&s->lock); 3921 3922 /* cannot proceed if image has snapshots */ 3923 if (s->nb_snapshots) { 3924 error_setg(errp, "Can't resize an image which has snapshots"); 3925 ret = -ENOTSUP; 3926 goto fail; 3927 } 3928 3929 /* cannot proceed if image has bitmaps */ 3930 if (qcow2_truncate_bitmaps_check(bs, errp)) { 3931 ret = -ENOTSUP; 3932 goto fail; 3933 } 3934 3935 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 3936 new_l1_size = size_to_l1(s, offset); 3937 3938 if (offset < old_length) { 3939 int64_t last_cluster, old_file_size; 3940 if (prealloc != PREALLOC_MODE_OFF) { 3941 error_setg(errp, 3942 "Preallocation can't be used for shrinking an image"); 3943 ret = -EINVAL; 3944 goto fail; 3945 } 3946 3947 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 3948 old_length - ROUND_UP(offset, 3949 s->cluster_size), 3950 QCOW2_DISCARD_ALWAYS, true); 3951 if (ret < 0) { 3952 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 3953 goto fail; 3954 } 3955 3956 ret = qcow2_shrink_l1_table(bs, new_l1_size); 3957 if (ret < 0) { 3958 error_setg_errno(errp, -ret, 3959 "Failed to reduce the number of L2 tables"); 3960 goto fail; 3961 } 3962 3963 ret = qcow2_shrink_reftable(bs); 3964 if (ret < 0) { 3965 error_setg_errno(errp, -ret, 3966 "Failed to discard unused refblocks"); 3967 goto fail; 3968 } 3969 3970 old_file_size = bdrv_getlength(bs->file->bs); 3971 if (old_file_size < 0) { 3972 error_setg_errno(errp, -old_file_size, 3973 "Failed to inquire current file length"); 3974 ret = old_file_size; 3975 goto fail; 3976 } 3977 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 3978 if (last_cluster < 0) { 3979 error_setg_errno(errp, -last_cluster, 3980 "Failed to find the last cluster"); 3981 ret = last_cluster; 3982 goto fail; 3983 } 3984 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 3985 Error *local_err = NULL; 3986 3987 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 3988 PREALLOC_MODE_OFF, &local_err); 3989 if (local_err) { 3990 warn_reportf_err(local_err, 3991 "Failed to truncate the tail of the image: "); 3992 } 3993 } 3994 } else { 3995 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 3996 if (ret < 0) { 3997 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 3998 goto fail; 3999 } 4000 } 4001 4002 switch (prealloc) { 4003 case PREALLOC_MODE_OFF: 4004 if (has_data_file(bs)) { 4005 ret = bdrv_co_truncate(s->data_file, offset, prealloc, errp); 4006 if (ret < 0) { 4007 goto fail; 4008 } 4009 } 4010 break; 4011 4012 case PREALLOC_MODE_METADATA: 4013 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4014 if (ret < 0) { 4015 goto fail; 4016 } 4017 break; 4018 4019 case PREALLOC_MODE_FALLOC: 4020 case PREALLOC_MODE_FULL: 4021 { 4022 int64_t allocation_start, host_offset, guest_offset; 4023 int64_t clusters_allocated; 4024 int64_t old_file_size, new_file_size; 4025 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4026 4027 /* With a data file, preallocation means just allocating the metadata 4028 * and forwarding the truncate request to the data file */ 4029 if (has_data_file(bs)) { 4030 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4031 if (ret < 0) { 4032 goto fail; 4033 } 4034 break; 4035 } 4036 4037 old_file_size = bdrv_getlength(bs->file->bs); 4038 if (old_file_size < 0) { 4039 error_setg_errno(errp, -old_file_size, 4040 "Failed to inquire current file length"); 4041 ret = old_file_size; 4042 goto fail; 4043 } 4044 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4045 4046 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 4047 s->cluster_size); 4048 4049 /* This is an overestimation; we will not actually allocate space for 4050 * these in the file but just make sure the new refcount structures are 4051 * able to cover them so we will not have to allocate new refblocks 4052 * while entering the data blocks in the potentially new L2 tables. 4053 * (We do not actually care where the L2 tables are placed. Maybe they 4054 * are already allocated or they can be placed somewhere before 4055 * @old_file_size. It does not matter because they will be fully 4056 * allocated automatically, so they do not need to be covered by the 4057 * preallocation. All that matters is that we will not have to allocate 4058 * new refcount structures for them.) */ 4059 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4060 s->cluster_size / sizeof(uint64_t)); 4061 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4062 * table for a potential head/tail */ 4063 nb_new_l2_tables++; 4064 4065 allocation_start = qcow2_refcount_area(bs, old_file_size, 4066 nb_new_data_clusters + 4067 nb_new_l2_tables, 4068 true, 0, 0); 4069 if (allocation_start < 0) { 4070 error_setg_errno(errp, -allocation_start, 4071 "Failed to resize refcount structures"); 4072 ret = allocation_start; 4073 goto fail; 4074 } 4075 4076 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4077 nb_new_data_clusters); 4078 if (clusters_allocated < 0) { 4079 error_setg_errno(errp, -clusters_allocated, 4080 "Failed to allocate data clusters"); 4081 ret = clusters_allocated; 4082 goto fail; 4083 } 4084 4085 assert(clusters_allocated == nb_new_data_clusters); 4086 4087 /* Allocate the data area */ 4088 new_file_size = allocation_start + 4089 nb_new_data_clusters * s->cluster_size; 4090 ret = bdrv_co_truncate(bs->file, new_file_size, prealloc, errp); 4091 if (ret < 0) { 4092 error_prepend(errp, "Failed to resize underlying file: "); 4093 qcow2_free_clusters(bs, allocation_start, 4094 nb_new_data_clusters * s->cluster_size, 4095 QCOW2_DISCARD_OTHER); 4096 goto fail; 4097 } 4098 4099 /* Create the necessary L2 entries */ 4100 host_offset = allocation_start; 4101 guest_offset = old_length; 4102 while (nb_new_data_clusters) { 4103 int64_t nb_clusters = MIN( 4104 nb_new_data_clusters, 4105 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4106 QCowL2Meta allocation = { 4107 .offset = guest_offset, 4108 .alloc_offset = host_offset, 4109 .nb_clusters = nb_clusters, 4110 }; 4111 qemu_co_queue_init(&allocation.dependent_requests); 4112 4113 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4114 if (ret < 0) { 4115 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4116 qcow2_free_clusters(bs, host_offset, 4117 nb_new_data_clusters * s->cluster_size, 4118 QCOW2_DISCARD_OTHER); 4119 goto fail; 4120 } 4121 4122 guest_offset += nb_clusters * s->cluster_size; 4123 host_offset += nb_clusters * s->cluster_size; 4124 nb_new_data_clusters -= nb_clusters; 4125 } 4126 break; 4127 } 4128 4129 default: 4130 g_assert_not_reached(); 4131 } 4132 4133 if (prealloc != PREALLOC_MODE_OFF) { 4134 /* Flush metadata before actually changing the image size */ 4135 ret = qcow2_write_caches(bs); 4136 if (ret < 0) { 4137 error_setg_errno(errp, -ret, 4138 "Failed to flush the preallocated area to disk"); 4139 goto fail; 4140 } 4141 } 4142 4143 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4144 4145 /* write updated header.size */ 4146 offset = cpu_to_be64(offset); 4147 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4148 &offset, sizeof(uint64_t)); 4149 if (ret < 0) { 4150 error_setg_errno(errp, -ret, "Failed to update the image size"); 4151 goto fail; 4152 } 4153 4154 s->l1_vm_state_index = new_l1_size; 4155 4156 /* Update cache sizes */ 4157 options = qdict_clone_shallow(bs->options); 4158 ret = qcow2_update_options(bs, options, s->flags, errp); 4159 qobject_unref(options); 4160 if (ret < 0) { 4161 goto fail; 4162 } 4163 ret = 0; 4164 fail: 4165 qemu_co_mutex_unlock(&s->lock); 4166 return ret; 4167 } 4168 4169 /* XXX: put compressed sectors first, then all the cluster aligned 4170 tables to avoid losing bytes in alignment */ 4171 static coroutine_fn int 4172 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4173 uint64_t offset, uint64_t bytes, 4174 QEMUIOVector *qiov, size_t qiov_offset) 4175 { 4176 BDRVQcow2State *s = bs->opaque; 4177 int ret; 4178 ssize_t out_len; 4179 uint8_t *buf, *out_buf; 4180 uint64_t cluster_offset; 4181 4182 if (has_data_file(bs)) { 4183 return -ENOTSUP; 4184 } 4185 4186 if (bytes == 0) { 4187 /* align end of file to a sector boundary to ease reading with 4188 sector based I/Os */ 4189 int64_t len = bdrv_getlength(bs->file->bs); 4190 if (len < 0) { 4191 return len; 4192 } 4193 return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); 4194 } 4195 4196 if (offset_into_cluster(s, offset)) { 4197 return -EINVAL; 4198 } 4199 4200 buf = qemu_blockalign(bs, s->cluster_size); 4201 if (bytes != s->cluster_size) { 4202 if (bytes > s->cluster_size || 4203 offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) 4204 { 4205 qemu_vfree(buf); 4206 return -EINVAL; 4207 } 4208 /* Zero-pad last write if image size is not cluster aligned */ 4209 memset(buf + bytes, 0, s->cluster_size - bytes); 4210 } 4211 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4212 4213 out_buf = g_malloc(s->cluster_size); 4214 4215 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4216 buf, s->cluster_size); 4217 if (out_len == -ENOMEM) { 4218 /* could not compress: write normal cluster */ 4219 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4220 if (ret < 0) { 4221 goto fail; 4222 } 4223 goto success; 4224 } else if (out_len < 0) { 4225 ret = -EINVAL; 4226 goto fail; 4227 } 4228 4229 qemu_co_mutex_lock(&s->lock); 4230 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4231 &cluster_offset); 4232 if (ret < 0) { 4233 qemu_co_mutex_unlock(&s->lock); 4234 goto fail; 4235 } 4236 4237 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4238 qemu_co_mutex_unlock(&s->lock); 4239 if (ret < 0) { 4240 goto fail; 4241 } 4242 4243 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4244 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4245 if (ret < 0) { 4246 goto fail; 4247 } 4248 success: 4249 ret = 0; 4250 fail: 4251 qemu_vfree(buf); 4252 g_free(out_buf); 4253 return ret; 4254 } 4255 4256 static int coroutine_fn 4257 qcow2_co_preadv_compressed(BlockDriverState *bs, 4258 uint64_t file_cluster_offset, 4259 uint64_t offset, 4260 uint64_t bytes, 4261 QEMUIOVector *qiov, 4262 size_t qiov_offset) 4263 { 4264 BDRVQcow2State *s = bs->opaque; 4265 int ret = 0, csize, nb_csectors; 4266 uint64_t coffset; 4267 uint8_t *buf, *out_buf; 4268 int offset_in_cluster = offset_into_cluster(s, offset); 4269 4270 coffset = file_cluster_offset & s->cluster_offset_mask; 4271 nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 4272 csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - 4273 (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK); 4274 4275 buf = g_try_malloc(csize); 4276 if (!buf) { 4277 return -ENOMEM; 4278 } 4279 4280 out_buf = qemu_blockalign(bs, s->cluster_size); 4281 4282 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4283 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4284 if (ret < 0) { 4285 goto fail; 4286 } 4287 4288 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4289 ret = -EIO; 4290 goto fail; 4291 } 4292 4293 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4294 4295 fail: 4296 qemu_vfree(out_buf); 4297 g_free(buf); 4298 4299 return ret; 4300 } 4301 4302 static int make_completely_empty(BlockDriverState *bs) 4303 { 4304 BDRVQcow2State *s = bs->opaque; 4305 Error *local_err = NULL; 4306 int ret, l1_clusters; 4307 int64_t offset; 4308 uint64_t *new_reftable = NULL; 4309 uint64_t rt_entry, l1_size2; 4310 struct { 4311 uint64_t l1_offset; 4312 uint64_t reftable_offset; 4313 uint32_t reftable_clusters; 4314 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4315 4316 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4317 if (ret < 0) { 4318 goto fail; 4319 } 4320 4321 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4322 if (ret < 0) { 4323 goto fail; 4324 } 4325 4326 /* Refcounts will be broken utterly */ 4327 ret = qcow2_mark_dirty(bs); 4328 if (ret < 0) { 4329 goto fail; 4330 } 4331 4332 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4333 4334 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4335 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 4336 4337 /* After this call, neither the in-memory nor the on-disk refcount 4338 * information accurately describe the actual references */ 4339 4340 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4341 l1_clusters * s->cluster_size, 0); 4342 if (ret < 0) { 4343 goto fail_broken_refcounts; 4344 } 4345 memset(s->l1_table, 0, l1_size2); 4346 4347 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4348 4349 /* Overwrite enough clusters at the beginning of the sectors to place 4350 * the refcount table, a refcount block and the L1 table in; this may 4351 * overwrite parts of the existing refcount and L1 table, which is not 4352 * an issue because the dirty flag is set, complete data loss is in fact 4353 * desired and partial data loss is consequently fine as well */ 4354 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4355 (2 + l1_clusters) * s->cluster_size, 0); 4356 /* This call (even if it failed overall) may have overwritten on-disk 4357 * refcount structures; in that case, the in-memory refcount information 4358 * will probably differ from the on-disk information which makes the BDS 4359 * unusable */ 4360 if (ret < 0) { 4361 goto fail_broken_refcounts; 4362 } 4363 4364 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4365 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4366 4367 /* "Create" an empty reftable (one cluster) directly after the image 4368 * header and an empty L1 table three clusters after the image header; 4369 * the cluster between those two will be used as the first refblock */ 4370 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4371 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4372 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4373 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4374 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 4375 if (ret < 0) { 4376 goto fail_broken_refcounts; 4377 } 4378 4379 s->l1_table_offset = 3 * s->cluster_size; 4380 4381 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 4382 if (!new_reftable) { 4383 ret = -ENOMEM; 4384 goto fail_broken_refcounts; 4385 } 4386 4387 s->refcount_table_offset = s->cluster_size; 4388 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 4389 s->max_refcount_table_index = 0; 4390 4391 g_free(s->refcount_table); 4392 s->refcount_table = new_reftable; 4393 new_reftable = NULL; 4394 4395 /* Now the in-memory refcount information again corresponds to the on-disk 4396 * information (reftable is empty and no refblocks (the refblock cache is 4397 * empty)); however, this means some clusters (e.g. the image header) are 4398 * referenced, but not refcounted, but the normal qcow2 code assumes that 4399 * the in-memory information is always correct */ 4400 4401 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4402 4403 /* Enter the first refblock into the reftable */ 4404 rt_entry = cpu_to_be64(2 * s->cluster_size); 4405 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 4406 &rt_entry, sizeof(rt_entry)); 4407 if (ret < 0) { 4408 goto fail_broken_refcounts; 4409 } 4410 s->refcount_table[0] = 2 * s->cluster_size; 4411 4412 s->free_cluster_index = 0; 4413 assert(3 + l1_clusters <= s->refcount_block_size); 4414 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4415 if (offset < 0) { 4416 ret = offset; 4417 goto fail_broken_refcounts; 4418 } else if (offset > 0) { 4419 error_report("First cluster in emptied image is in use"); 4420 abort(); 4421 } 4422 4423 /* Now finally the in-memory information corresponds to the on-disk 4424 * structures and is correct */ 4425 ret = qcow2_mark_clean(bs); 4426 if (ret < 0) { 4427 goto fail; 4428 } 4429 4430 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, 4431 PREALLOC_MODE_OFF, &local_err); 4432 if (ret < 0) { 4433 error_report_err(local_err); 4434 goto fail; 4435 } 4436 4437 return 0; 4438 4439 fail_broken_refcounts: 4440 /* The BDS is unusable at this point. If we wanted to make it usable, we 4441 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4442 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4443 * again. However, because the functions which could have caused this error 4444 * path to be taken are used by those functions as well, it's very likely 4445 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4446 bs->drv = NULL; 4447 4448 fail: 4449 g_free(new_reftable); 4450 return ret; 4451 } 4452 4453 static int qcow2_make_empty(BlockDriverState *bs) 4454 { 4455 BDRVQcow2State *s = bs->opaque; 4456 uint64_t offset, end_offset; 4457 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4458 int l1_clusters, ret = 0; 4459 4460 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4461 4462 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4463 3 + l1_clusters <= s->refcount_block_size && 4464 s->crypt_method_header != QCOW_CRYPT_LUKS && 4465 !has_data_file(bs)) { 4466 /* The following function only works for qcow2 v3 images (it 4467 * requires the dirty flag) and only as long as there are no 4468 * features that reserve extra clusters (such as snapshots, 4469 * LUKS header, or persistent bitmaps), because it completely 4470 * empties the image. Furthermore, the L1 table and three 4471 * additional clusters (image header, refcount table, one 4472 * refcount block) have to fit inside one refcount block. It 4473 * only resets the image file, i.e. does not work with an 4474 * external data file. */ 4475 return make_completely_empty(bs); 4476 } 4477 4478 /* This fallback code simply discards every active cluster; this is slow, 4479 * but works in all cases */ 4480 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4481 for (offset = 0; offset < end_offset; offset += step) { 4482 /* As this function is generally used after committing an external 4483 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4484 * default action for this kind of discard is to pass the discard, 4485 * which will ideally result in an actually smaller image file, as 4486 * is probably desired. */ 4487 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4488 QCOW2_DISCARD_SNAPSHOT, true); 4489 if (ret < 0) { 4490 break; 4491 } 4492 } 4493 4494 return ret; 4495 } 4496 4497 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4498 { 4499 BDRVQcow2State *s = bs->opaque; 4500 int ret; 4501 4502 qemu_co_mutex_lock(&s->lock); 4503 ret = qcow2_write_caches(bs); 4504 qemu_co_mutex_unlock(&s->lock); 4505 4506 return ret; 4507 } 4508 4509 static ssize_t qcow2_measure_crypto_hdr_init_func(QCryptoBlock *block, 4510 size_t headerlen, void *opaque, Error **errp) 4511 { 4512 size_t *headerlenp = opaque; 4513 4514 /* Stash away the payload size */ 4515 *headerlenp = headerlen; 4516 return 0; 4517 } 4518 4519 static ssize_t qcow2_measure_crypto_hdr_write_func(QCryptoBlock *block, 4520 size_t offset, const uint8_t *buf, size_t buflen, 4521 void *opaque, Error **errp) 4522 { 4523 /* Discard the bytes, we're not actually writing to an image */ 4524 return buflen; 4525 } 4526 4527 /* Determine the number of bytes for the LUKS payload */ 4528 static bool qcow2_measure_luks_headerlen(QemuOpts *opts, size_t *len, 4529 Error **errp) 4530 { 4531 QDict *opts_qdict; 4532 QDict *cryptoopts_qdict; 4533 QCryptoBlockCreateOptions *cryptoopts; 4534 QCryptoBlock *crypto; 4535 4536 /* Extract "encrypt." options into a qdict */ 4537 opts_qdict = qemu_opts_to_qdict(opts, NULL); 4538 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt."); 4539 qobject_unref(opts_qdict); 4540 4541 /* Build QCryptoBlockCreateOptions object from qdict */ 4542 qdict_put_str(cryptoopts_qdict, "format", "luks"); 4543 cryptoopts = block_crypto_create_opts_init(cryptoopts_qdict, errp); 4544 qobject_unref(cryptoopts_qdict); 4545 if (!cryptoopts) { 4546 return false; 4547 } 4548 4549 /* Fake LUKS creation in order to determine the payload size */ 4550 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 4551 qcow2_measure_crypto_hdr_init_func, 4552 qcow2_measure_crypto_hdr_write_func, 4553 len, errp); 4554 qapi_free_QCryptoBlockCreateOptions(cryptoopts); 4555 if (!crypto) { 4556 return false; 4557 } 4558 4559 qcrypto_block_free(crypto); 4560 return true; 4561 } 4562 4563 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4564 Error **errp) 4565 { 4566 Error *local_err = NULL; 4567 BlockMeasureInfo *info; 4568 uint64_t required = 0; /* bytes that contribute to required size */ 4569 uint64_t virtual_size; /* disk size as seen by guest */ 4570 uint64_t refcount_bits; 4571 uint64_t l2_tables; 4572 uint64_t luks_payload_size = 0; 4573 size_t cluster_size; 4574 int version; 4575 char *optstr; 4576 PreallocMode prealloc; 4577 bool has_backing_file; 4578 bool has_luks; 4579 4580 /* Parse image creation options */ 4581 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 4582 if (local_err) { 4583 goto err; 4584 } 4585 4586 version = qcow2_opt_get_version_del(opts, &local_err); 4587 if (local_err) { 4588 goto err; 4589 } 4590 4591 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 4592 if (local_err) { 4593 goto err; 4594 } 4595 4596 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 4597 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 4598 PREALLOC_MODE_OFF, &local_err); 4599 g_free(optstr); 4600 if (local_err) { 4601 goto err; 4602 } 4603 4604 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 4605 has_backing_file = !!optstr; 4606 g_free(optstr); 4607 4608 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 4609 has_luks = optstr && strcmp(optstr, "luks") == 0; 4610 g_free(optstr); 4611 4612 if (has_luks) { 4613 size_t headerlen; 4614 4615 if (!qcow2_measure_luks_headerlen(opts, &headerlen, &local_err)) { 4616 goto err; 4617 } 4618 4619 luks_payload_size = ROUND_UP(headerlen, cluster_size); 4620 } 4621 4622 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 4623 virtual_size = ROUND_UP(virtual_size, cluster_size); 4624 4625 /* Check that virtual disk size is valid */ 4626 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 4627 cluster_size / sizeof(uint64_t)); 4628 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 4629 error_setg(&local_err, "The image size is too large " 4630 "(try using a larger cluster size)"); 4631 goto err; 4632 } 4633 4634 /* Account for input image */ 4635 if (in_bs) { 4636 int64_t ssize = bdrv_getlength(in_bs); 4637 if (ssize < 0) { 4638 error_setg_errno(&local_err, -ssize, 4639 "Unable to get image virtual_size"); 4640 goto err; 4641 } 4642 4643 virtual_size = ROUND_UP(ssize, cluster_size); 4644 4645 if (has_backing_file) { 4646 /* We don't how much of the backing chain is shared by the input 4647 * image and the new image file. In the worst case the new image's 4648 * backing file has nothing in common with the input image. Be 4649 * conservative and assume all clusters need to be written. 4650 */ 4651 required = virtual_size; 4652 } else { 4653 int64_t offset; 4654 int64_t pnum = 0; 4655 4656 for (offset = 0; offset < ssize; offset += pnum) { 4657 int ret; 4658 4659 ret = bdrv_block_status_above(in_bs, NULL, offset, 4660 ssize - offset, &pnum, NULL, 4661 NULL); 4662 if (ret < 0) { 4663 error_setg_errno(&local_err, -ret, 4664 "Unable to get block status"); 4665 goto err; 4666 } 4667 4668 if (ret & BDRV_BLOCK_ZERO) { 4669 /* Skip zero regions (safe with no backing file) */ 4670 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 4671 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 4672 /* Extend pnum to end of cluster for next iteration */ 4673 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 4674 4675 /* Count clusters we've seen */ 4676 required += offset % cluster_size + pnum; 4677 } 4678 } 4679 } 4680 } 4681 4682 /* Take into account preallocation. Nothing special is needed for 4683 * PREALLOC_MODE_METADATA since metadata is always counted. 4684 */ 4685 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 4686 required = virtual_size; 4687 } 4688 4689 info = g_new(BlockMeasureInfo, 1); 4690 info->fully_allocated = 4691 qcow2_calc_prealloc_size(virtual_size, cluster_size, 4692 ctz32(refcount_bits)) + luks_payload_size; 4693 4694 /* Remove data clusters that are not required. This overestimates the 4695 * required size because metadata needed for the fully allocated file is 4696 * still counted. 4697 */ 4698 info->required = info->fully_allocated - virtual_size + required; 4699 return info; 4700 4701 err: 4702 error_propagate(errp, local_err); 4703 return NULL; 4704 } 4705 4706 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4707 { 4708 BDRVQcow2State *s = bs->opaque; 4709 bdi->unallocated_blocks_are_zero = true; 4710 bdi->cluster_size = s->cluster_size; 4711 bdi->vm_state_offset = qcow2_vm_state_offset(s); 4712 return 0; 4713 } 4714 4715 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 4716 Error **errp) 4717 { 4718 BDRVQcow2State *s = bs->opaque; 4719 ImageInfoSpecific *spec_info; 4720 QCryptoBlockInfo *encrypt_info = NULL; 4721 Error *local_err = NULL; 4722 4723 if (s->crypto != NULL) { 4724 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err); 4725 if (local_err) { 4726 error_propagate(errp, local_err); 4727 return NULL; 4728 } 4729 } 4730 4731 spec_info = g_new(ImageInfoSpecific, 1); 4732 *spec_info = (ImageInfoSpecific){ 4733 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 4734 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 4735 }; 4736 if (s->qcow_version == 2) { 4737 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4738 .compat = g_strdup("0.10"), 4739 .refcount_bits = s->refcount_bits, 4740 }; 4741 } else if (s->qcow_version == 3) { 4742 Qcow2BitmapInfoList *bitmaps; 4743 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err); 4744 if (local_err) { 4745 error_propagate(errp, local_err); 4746 qapi_free_ImageInfoSpecific(spec_info); 4747 return NULL; 4748 } 4749 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4750 .compat = g_strdup("1.1"), 4751 .lazy_refcounts = s->compatible_features & 4752 QCOW2_COMPAT_LAZY_REFCOUNTS, 4753 .has_lazy_refcounts = true, 4754 .corrupt = s->incompatible_features & 4755 QCOW2_INCOMPAT_CORRUPT, 4756 .has_corrupt = true, 4757 .refcount_bits = s->refcount_bits, 4758 .has_bitmaps = !!bitmaps, 4759 .bitmaps = bitmaps, 4760 .has_data_file = !!s->image_data_file, 4761 .data_file = g_strdup(s->image_data_file), 4762 .has_data_file_raw = has_data_file(bs), 4763 .data_file_raw = data_file_is_raw(bs), 4764 }; 4765 } else { 4766 /* if this assertion fails, this probably means a new version was 4767 * added without having it covered here */ 4768 assert(false); 4769 } 4770 4771 if (encrypt_info) { 4772 ImageInfoSpecificQCow2Encryption *qencrypt = 4773 g_new(ImageInfoSpecificQCow2Encryption, 1); 4774 switch (encrypt_info->format) { 4775 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 4776 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 4777 break; 4778 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 4779 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 4780 qencrypt->u.luks = encrypt_info->u.luks; 4781 break; 4782 default: 4783 abort(); 4784 } 4785 /* Since we did shallow copy above, erase any pointers 4786 * in the original info */ 4787 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 4788 qapi_free_QCryptoBlockInfo(encrypt_info); 4789 4790 spec_info->u.qcow2.data->has_encrypt = true; 4791 spec_info->u.qcow2.data->encrypt = qencrypt; 4792 } 4793 4794 return spec_info; 4795 } 4796 4797 static int qcow2_has_zero_init(BlockDriverState *bs) 4798 { 4799 BDRVQcow2State *s = bs->opaque; 4800 bool preallocated; 4801 4802 if (qemu_in_coroutine()) { 4803 qemu_co_mutex_lock(&s->lock); 4804 } 4805 /* 4806 * Check preallocation status: Preallocated images have all L2 4807 * tables allocated, nonpreallocated images have none. It is 4808 * therefore enough to check the first one. 4809 */ 4810 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 4811 if (qemu_in_coroutine()) { 4812 qemu_co_mutex_unlock(&s->lock); 4813 } 4814 4815 if (!preallocated) { 4816 return 1; 4817 } else if (bs->encrypted) { 4818 return 0; 4819 } else { 4820 return bdrv_has_zero_init(s->data_file->bs); 4821 } 4822 } 4823 4824 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4825 int64_t pos) 4826 { 4827 BDRVQcow2State *s = bs->opaque; 4828 4829 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 4830 return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos, 4831 qiov->size, qiov, 0, 0); 4832 } 4833 4834 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4835 int64_t pos) 4836 { 4837 BDRVQcow2State *s = bs->opaque; 4838 4839 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 4840 return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos, 4841 qiov->size, qiov, 0, 0); 4842 } 4843 4844 /* 4845 * Downgrades an image's version. To achieve this, any incompatible features 4846 * have to be removed. 4847 */ 4848 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 4849 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 4850 Error **errp) 4851 { 4852 BDRVQcow2State *s = bs->opaque; 4853 int current_version = s->qcow_version; 4854 int ret; 4855 4856 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 4857 assert(target_version < current_version); 4858 4859 /* There are no other versions (now) that you can downgrade to */ 4860 assert(target_version == 2); 4861 4862 if (s->refcount_order != 4) { 4863 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 4864 return -ENOTSUP; 4865 } 4866 4867 if (has_data_file(bs)) { 4868 error_setg(errp, "Cannot downgrade an image with a data file"); 4869 return -ENOTSUP; 4870 } 4871 4872 /* clear incompatible features */ 4873 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 4874 ret = qcow2_mark_clean(bs); 4875 if (ret < 0) { 4876 error_setg_errno(errp, -ret, "Failed to make the image clean"); 4877 return ret; 4878 } 4879 } 4880 4881 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 4882 * the first place; if that happens nonetheless, returning -ENOTSUP is the 4883 * best thing to do anyway */ 4884 4885 if (s->incompatible_features) { 4886 error_setg(errp, "Cannot downgrade an image with incompatible features " 4887 "%#" PRIx64 " set", s->incompatible_features); 4888 return -ENOTSUP; 4889 } 4890 4891 /* since we can ignore compatible features, we can set them to 0 as well */ 4892 s->compatible_features = 0; 4893 /* if lazy refcounts have been used, they have already been fixed through 4894 * clearing the dirty flag */ 4895 4896 /* clearing autoclear features is trivial */ 4897 s->autoclear_features = 0; 4898 4899 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 4900 if (ret < 0) { 4901 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 4902 return ret; 4903 } 4904 4905 s->qcow_version = target_version; 4906 ret = qcow2_update_header(bs); 4907 if (ret < 0) { 4908 s->qcow_version = current_version; 4909 error_setg_errno(errp, -ret, "Failed to update the image header"); 4910 return ret; 4911 } 4912 return 0; 4913 } 4914 4915 typedef enum Qcow2AmendOperation { 4916 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 4917 * statically initialized to so that the helper CB can discern the first 4918 * invocation from an operation change */ 4919 QCOW2_NO_OPERATION = 0, 4920 4921 QCOW2_CHANGING_REFCOUNT_ORDER, 4922 QCOW2_DOWNGRADING, 4923 } Qcow2AmendOperation; 4924 4925 typedef struct Qcow2AmendHelperCBInfo { 4926 /* The code coordinating the amend operations should only modify 4927 * these four fields; the rest will be managed by the CB */ 4928 BlockDriverAmendStatusCB *original_status_cb; 4929 void *original_cb_opaque; 4930 4931 Qcow2AmendOperation current_operation; 4932 4933 /* Total number of operations to perform (only set once) */ 4934 int total_operations; 4935 4936 /* The following fields are managed by the CB */ 4937 4938 /* Number of operations completed */ 4939 int operations_completed; 4940 4941 /* Cumulative offset of all completed operations */ 4942 int64_t offset_completed; 4943 4944 Qcow2AmendOperation last_operation; 4945 int64_t last_work_size; 4946 } Qcow2AmendHelperCBInfo; 4947 4948 static void qcow2_amend_helper_cb(BlockDriverState *bs, 4949 int64_t operation_offset, 4950 int64_t operation_work_size, void *opaque) 4951 { 4952 Qcow2AmendHelperCBInfo *info = opaque; 4953 int64_t current_work_size; 4954 int64_t projected_work_size; 4955 4956 if (info->current_operation != info->last_operation) { 4957 if (info->last_operation != QCOW2_NO_OPERATION) { 4958 info->offset_completed += info->last_work_size; 4959 info->operations_completed++; 4960 } 4961 4962 info->last_operation = info->current_operation; 4963 } 4964 4965 assert(info->total_operations > 0); 4966 assert(info->operations_completed < info->total_operations); 4967 4968 info->last_work_size = operation_work_size; 4969 4970 current_work_size = info->offset_completed + operation_work_size; 4971 4972 /* current_work_size is the total work size for (operations_completed + 1) 4973 * operations (which includes this one), so multiply it by the number of 4974 * operations not covered and divide it by the number of operations 4975 * covered to get a projection for the operations not covered */ 4976 projected_work_size = current_work_size * (info->total_operations - 4977 info->operations_completed - 1) 4978 / (info->operations_completed + 1); 4979 4980 info->original_status_cb(bs, info->offset_completed + operation_offset, 4981 current_work_size + projected_work_size, 4982 info->original_cb_opaque); 4983 } 4984 4985 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 4986 BlockDriverAmendStatusCB *status_cb, 4987 void *cb_opaque, 4988 Error **errp) 4989 { 4990 BDRVQcow2State *s = bs->opaque; 4991 int old_version = s->qcow_version, new_version = old_version; 4992 uint64_t new_size = 0; 4993 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 4994 bool lazy_refcounts = s->use_lazy_refcounts; 4995 bool data_file_raw = data_file_is_raw(bs); 4996 const char *compat = NULL; 4997 uint64_t cluster_size = s->cluster_size; 4998 bool encrypt; 4999 int encformat; 5000 int refcount_bits = s->refcount_bits; 5001 int ret; 5002 QemuOptDesc *desc = opts->list->desc; 5003 Qcow2AmendHelperCBInfo helper_cb_info; 5004 5005 while (desc && desc->name) { 5006 if (!qemu_opt_find(opts, desc->name)) { 5007 /* only change explicitly defined options */ 5008 desc++; 5009 continue; 5010 } 5011 5012 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5013 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5014 if (!compat) { 5015 /* preserve default */ 5016 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5017 new_version = 2; 5018 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5019 new_version = 3; 5020 } else { 5021 error_setg(errp, "Unknown compatibility level %s", compat); 5022 return -EINVAL; 5023 } 5024 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 5025 error_setg(errp, "Cannot change preallocation mode"); 5026 return -ENOTSUP; 5027 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5028 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5029 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5030 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5031 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5032 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5033 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 5034 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 5035 !!s->crypto); 5036 5037 if (encrypt != !!s->crypto) { 5038 error_setg(errp, 5039 "Changing the encryption flag is not supported"); 5040 return -ENOTSUP; 5041 } 5042 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 5043 encformat = qcow2_crypt_method_from_format( 5044 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 5045 5046 if (encformat != s->crypt_method_header) { 5047 error_setg(errp, 5048 "Changing the encryption format is not supported"); 5049 return -ENOTSUP; 5050 } 5051 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5052 error_setg(errp, 5053 "Changing the encryption parameters is not supported"); 5054 return -ENOTSUP; 5055 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 5056 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 5057 cluster_size); 5058 if (cluster_size != s->cluster_size) { 5059 error_setg(errp, "Changing the cluster size is not supported"); 5060 return -ENOTSUP; 5061 } 5062 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5063 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5064 lazy_refcounts); 5065 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5066 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5067 refcount_bits); 5068 5069 if (refcount_bits <= 0 || refcount_bits > 64 || 5070 !is_power_of_2(refcount_bits)) 5071 { 5072 error_setg(errp, "Refcount width must be a power of two and " 5073 "may not exceed 64 bits"); 5074 return -EINVAL; 5075 } 5076 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5077 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5078 if (data_file && !has_data_file(bs)) { 5079 error_setg(errp, "data-file can only be set for images that " 5080 "use an external data file"); 5081 return -EINVAL; 5082 } 5083 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5084 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5085 data_file_raw); 5086 if (data_file_raw && !data_file_is_raw(bs)) { 5087 error_setg(errp, "data-file-raw cannot be set on existing " 5088 "images"); 5089 return -EINVAL; 5090 } 5091 } else { 5092 /* if this point is reached, this probably means a new option was 5093 * added without having it covered here */ 5094 abort(); 5095 } 5096 5097 desc++; 5098 } 5099 5100 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5101 .original_status_cb = status_cb, 5102 .original_cb_opaque = cb_opaque, 5103 .total_operations = (new_version < old_version) 5104 + (s->refcount_bits != refcount_bits) 5105 }; 5106 5107 /* Upgrade first (some features may require compat=1.1) */ 5108 if (new_version > old_version) { 5109 s->qcow_version = new_version; 5110 ret = qcow2_update_header(bs); 5111 if (ret < 0) { 5112 s->qcow_version = old_version; 5113 error_setg_errno(errp, -ret, "Failed to update the image header"); 5114 return ret; 5115 } 5116 } 5117 5118 if (s->refcount_bits != refcount_bits) { 5119 int refcount_order = ctz32(refcount_bits); 5120 5121 if (new_version < 3 && refcount_bits != 16) { 5122 error_setg(errp, "Refcount widths other than 16 bits require " 5123 "compatibility level 1.1 or above (use compat=1.1 or " 5124 "greater)"); 5125 return -EINVAL; 5126 } 5127 5128 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5129 ret = qcow2_change_refcount_order(bs, refcount_order, 5130 &qcow2_amend_helper_cb, 5131 &helper_cb_info, errp); 5132 if (ret < 0) { 5133 return ret; 5134 } 5135 } 5136 5137 /* data-file-raw blocks backing files, so clear it first if requested */ 5138 if (data_file_raw) { 5139 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5140 } else { 5141 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5142 } 5143 5144 if (data_file) { 5145 g_free(s->image_data_file); 5146 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5147 } 5148 5149 ret = qcow2_update_header(bs); 5150 if (ret < 0) { 5151 error_setg_errno(errp, -ret, "Failed to update the image header"); 5152 return ret; 5153 } 5154 5155 if (backing_file || backing_format) { 5156 ret = qcow2_change_backing_file(bs, 5157 backing_file ?: s->image_backing_file, 5158 backing_format ?: s->image_backing_format); 5159 if (ret < 0) { 5160 error_setg_errno(errp, -ret, "Failed to change the backing file"); 5161 return ret; 5162 } 5163 } 5164 5165 if (s->use_lazy_refcounts != lazy_refcounts) { 5166 if (lazy_refcounts) { 5167 if (new_version < 3) { 5168 error_setg(errp, "Lazy refcounts only supported with " 5169 "compatibility level 1.1 and above (use compat=1.1 " 5170 "or greater)"); 5171 return -EINVAL; 5172 } 5173 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5174 ret = qcow2_update_header(bs); 5175 if (ret < 0) { 5176 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5177 error_setg_errno(errp, -ret, "Failed to update the image header"); 5178 return ret; 5179 } 5180 s->use_lazy_refcounts = true; 5181 } else { 5182 /* make image clean first */ 5183 ret = qcow2_mark_clean(bs); 5184 if (ret < 0) { 5185 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5186 return ret; 5187 } 5188 /* now disallow lazy refcounts */ 5189 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5190 ret = qcow2_update_header(bs); 5191 if (ret < 0) { 5192 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5193 error_setg_errno(errp, -ret, "Failed to update the image header"); 5194 return ret; 5195 } 5196 s->use_lazy_refcounts = false; 5197 } 5198 } 5199 5200 if (new_size) { 5201 BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), 5202 BLK_PERM_RESIZE, BLK_PERM_ALL); 5203 ret = blk_insert_bs(blk, bs, errp); 5204 if (ret < 0) { 5205 blk_unref(blk); 5206 return ret; 5207 } 5208 5209 ret = blk_truncate(blk, new_size, PREALLOC_MODE_OFF, errp); 5210 blk_unref(blk); 5211 if (ret < 0) { 5212 return ret; 5213 } 5214 } 5215 5216 /* Downgrade last (so unsupported features can be removed before) */ 5217 if (new_version < old_version) { 5218 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5219 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5220 &helper_cb_info, errp); 5221 if (ret < 0) { 5222 return ret; 5223 } 5224 } 5225 5226 return 0; 5227 } 5228 5229 /* 5230 * If offset or size are negative, respectively, they will not be included in 5231 * the BLOCK_IMAGE_CORRUPTED event emitted. 5232 * fatal will be ignored for read-only BDS; corruptions found there will always 5233 * be considered non-fatal. 5234 */ 5235 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5236 int64_t size, const char *message_format, ...) 5237 { 5238 BDRVQcow2State *s = bs->opaque; 5239 const char *node_name; 5240 char *message; 5241 va_list ap; 5242 5243 fatal = fatal && bdrv_is_writable(bs); 5244 5245 if (s->signaled_corruption && 5246 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5247 { 5248 return; 5249 } 5250 5251 va_start(ap, message_format); 5252 message = g_strdup_vprintf(message_format, ap); 5253 va_end(ap); 5254 5255 if (fatal) { 5256 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 5257 "corruption events will be suppressed\n", message); 5258 } else { 5259 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 5260 "corruption events will be suppressed\n", message); 5261 } 5262 5263 node_name = bdrv_get_node_name(bs); 5264 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 5265 *node_name != '\0', node_name, 5266 message, offset >= 0, offset, 5267 size >= 0, size, 5268 fatal); 5269 g_free(message); 5270 5271 if (fatal) { 5272 qcow2_mark_corrupt(bs); 5273 bs->drv = NULL; /* make BDS unusable */ 5274 } 5275 5276 s->signaled_corruption = true; 5277 } 5278 5279 static QemuOptsList qcow2_create_opts = { 5280 .name = "qcow2-create-opts", 5281 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 5282 .desc = { 5283 { 5284 .name = BLOCK_OPT_SIZE, 5285 .type = QEMU_OPT_SIZE, 5286 .help = "Virtual disk size" 5287 }, 5288 { 5289 .name = BLOCK_OPT_COMPAT_LEVEL, 5290 .type = QEMU_OPT_STRING, 5291 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" 5292 }, 5293 { 5294 .name = BLOCK_OPT_BACKING_FILE, 5295 .type = QEMU_OPT_STRING, 5296 .help = "File name of a base image" 5297 }, 5298 { 5299 .name = BLOCK_OPT_BACKING_FMT, 5300 .type = QEMU_OPT_STRING, 5301 .help = "Image format of the base image" 5302 }, 5303 { 5304 .name = BLOCK_OPT_DATA_FILE, 5305 .type = QEMU_OPT_STRING, 5306 .help = "File name of an external data file" 5307 }, 5308 { 5309 .name = BLOCK_OPT_DATA_FILE_RAW, 5310 .type = QEMU_OPT_BOOL, 5311 .help = "The external data file must stay valid as a raw image" 5312 }, 5313 { 5314 .name = BLOCK_OPT_ENCRYPT, 5315 .type = QEMU_OPT_BOOL, 5316 .help = "Encrypt the image with format 'aes'. (Deprecated " 5317 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 5318 }, 5319 { 5320 .name = BLOCK_OPT_ENCRYPT_FORMAT, 5321 .type = QEMU_OPT_STRING, 5322 .help = "Encrypt the image, format choices: 'aes', 'luks'", 5323 }, 5324 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 5325 "ID of secret providing qcow AES key or LUKS passphrase"), 5326 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 5327 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 5328 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 5329 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 5330 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 5331 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 5332 { 5333 .name = BLOCK_OPT_CLUSTER_SIZE, 5334 .type = QEMU_OPT_SIZE, 5335 .help = "qcow2 cluster size", 5336 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 5337 }, 5338 { 5339 .name = BLOCK_OPT_PREALLOC, 5340 .type = QEMU_OPT_STRING, 5341 .help = "Preallocation mode (allowed values: off, metadata, " 5342 "falloc, full)" 5343 }, 5344 { 5345 .name = BLOCK_OPT_LAZY_REFCOUNTS, 5346 .type = QEMU_OPT_BOOL, 5347 .help = "Postpone refcount updates", 5348 .def_value_str = "off" 5349 }, 5350 { 5351 .name = BLOCK_OPT_REFCOUNT_BITS, 5352 .type = QEMU_OPT_NUMBER, 5353 .help = "Width of a reference count entry in bits", 5354 .def_value_str = "16" 5355 }, 5356 { /* end of list */ } 5357 } 5358 }; 5359 5360 static const char *const qcow2_strong_runtime_opts[] = { 5361 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 5362 5363 NULL 5364 }; 5365 5366 BlockDriver bdrv_qcow2 = { 5367 .format_name = "qcow2", 5368 .instance_size = sizeof(BDRVQcow2State), 5369 .bdrv_probe = qcow2_probe, 5370 .bdrv_open = qcow2_open, 5371 .bdrv_close = qcow2_close, 5372 .bdrv_reopen_prepare = qcow2_reopen_prepare, 5373 .bdrv_reopen_commit = qcow2_reopen_commit, 5374 .bdrv_reopen_abort = qcow2_reopen_abort, 5375 .bdrv_join_options = qcow2_join_options, 5376 .bdrv_child_perm = bdrv_format_default_perms, 5377 .bdrv_co_create_opts = qcow2_co_create_opts, 5378 .bdrv_co_create = qcow2_co_create, 5379 .bdrv_has_zero_init = qcow2_has_zero_init, 5380 .bdrv_has_zero_init_truncate = bdrv_has_zero_init_1, 5381 .bdrv_co_block_status = qcow2_co_block_status, 5382 5383 .bdrv_co_preadv_part = qcow2_co_preadv_part, 5384 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 5385 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 5386 5387 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 5388 .bdrv_co_pdiscard = qcow2_co_pdiscard, 5389 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 5390 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 5391 .bdrv_co_truncate = qcow2_co_truncate, 5392 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 5393 .bdrv_make_empty = qcow2_make_empty, 5394 5395 .bdrv_snapshot_create = qcow2_snapshot_create, 5396 .bdrv_snapshot_goto = qcow2_snapshot_goto, 5397 .bdrv_snapshot_delete = qcow2_snapshot_delete, 5398 .bdrv_snapshot_list = qcow2_snapshot_list, 5399 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 5400 .bdrv_measure = qcow2_measure, 5401 .bdrv_get_info = qcow2_get_info, 5402 .bdrv_get_specific_info = qcow2_get_specific_info, 5403 5404 .bdrv_save_vmstate = qcow2_save_vmstate, 5405 .bdrv_load_vmstate = qcow2_load_vmstate, 5406 5407 .supports_backing = true, 5408 .bdrv_change_backing_file = qcow2_change_backing_file, 5409 5410 .bdrv_refresh_limits = qcow2_refresh_limits, 5411 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 5412 .bdrv_inactivate = qcow2_inactivate, 5413 5414 .create_opts = &qcow2_create_opts, 5415 .strong_runtime_opts = qcow2_strong_runtime_opts, 5416 .mutable_opts = mutable_opts, 5417 .bdrv_co_check = qcow2_co_check, 5418 .bdrv_amend_options = qcow2_amend_options, 5419 5420 .bdrv_detach_aio_context = qcow2_detach_aio_context, 5421 .bdrv_attach_aio_context = qcow2_attach_aio_context, 5422 5423 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 5424 .bdrv_co_remove_persistent_dirty_bitmap = 5425 qcow2_co_remove_persistent_dirty_bitmap, 5426 }; 5427 5428 static void bdrv_qcow2_init(void) 5429 { 5430 bdrv_register(&bdrv_qcow2); 5431 } 5432 5433 block_init(bdrv_qcow2_init); 5434