1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 27 #include "block/qdict.h" 28 #include "sysemu/block-backend.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/module.h" 31 #include "qcow2.h" 32 #include "qemu/error-report.h" 33 #include "qapi/error.h" 34 #include "qapi/qapi-events-block-core.h" 35 #include "qapi/qmp/qdict.h" 36 #include "qapi/qmp/qstring.h" 37 #include "trace.h" 38 #include "qemu/option_int.h" 39 #include "qemu/cutils.h" 40 #include "qemu/bswap.h" 41 #include "qapi/qobject-input-visitor.h" 42 #include "qapi/qapi-visit-block-core.h" 43 #include "crypto.h" 44 #include "block/aio_task.h" 45 46 /* 47 Differences with QCOW: 48 49 - Support for multiple incremental snapshots. 50 - Memory management by reference counts. 51 - Clusters which have a reference count of one have the bit 52 QCOW_OFLAG_COPIED to optimize write performance. 53 - Size of compressed clusters is stored in sectors to reduce bit usage 54 in the cluster offsets. 55 - Support for storing additional data (such as the VM state) in the 56 snapshots. 57 - If a backing store is used, the cluster size is not constrained 58 (could be backported to QCOW). 59 - L2 tables have always a size of one cluster. 60 */ 61 62 63 typedef struct { 64 uint32_t magic; 65 uint32_t len; 66 } QEMU_PACKED QCowExtension; 67 68 #define QCOW2_EXT_MAGIC_END 0 69 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 70 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 71 #define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77 72 #define QCOW2_EXT_MAGIC_BITMAPS 0x23852875 73 #define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441 74 75 static int coroutine_fn 76 qcow2_co_preadv_compressed(BlockDriverState *bs, 77 uint64_t file_cluster_offset, 78 uint64_t offset, 79 uint64_t bytes, 80 QEMUIOVector *qiov, 81 size_t qiov_offset); 82 83 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 84 { 85 const QCowHeader *cow_header = (const void *)buf; 86 87 if (buf_size >= sizeof(QCowHeader) && 88 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 89 be32_to_cpu(cow_header->version) >= 2) 90 return 100; 91 else 92 return 0; 93 } 94 95 96 static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset, 97 uint8_t *buf, size_t buflen, 98 void *opaque, Error **errp) 99 { 100 BlockDriverState *bs = opaque; 101 BDRVQcow2State *s = bs->opaque; 102 ssize_t ret; 103 104 if ((offset + buflen) > s->crypto_header.length) { 105 error_setg(errp, "Request for data outside of extension header"); 106 return -1; 107 } 108 109 ret = bdrv_pread(bs->file, 110 s->crypto_header.offset + offset, buf, buflen); 111 if (ret < 0) { 112 error_setg_errno(errp, -ret, "Could not read encryption header"); 113 return -1; 114 } 115 return ret; 116 } 117 118 119 static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, 120 void *opaque, Error **errp) 121 { 122 BlockDriverState *bs = opaque; 123 BDRVQcow2State *s = bs->opaque; 124 int64_t ret; 125 int64_t clusterlen; 126 127 ret = qcow2_alloc_clusters(bs, headerlen); 128 if (ret < 0) { 129 error_setg_errno(errp, -ret, 130 "Cannot allocate cluster for LUKS header size %zu", 131 headerlen); 132 return -1; 133 } 134 135 s->crypto_header.length = headerlen; 136 s->crypto_header.offset = ret; 137 138 /* 139 * Zero fill all space in cluster so it has predictable 140 * content, as we may not initialize some regions of the 141 * header (eg only 1 out of 8 key slots will be initialized) 142 */ 143 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; 144 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); 145 ret = bdrv_pwrite_zeroes(bs->file, 146 ret, 147 clusterlen, 0); 148 if (ret < 0) { 149 error_setg_errno(errp, -ret, "Could not zero fill encryption header"); 150 return -1; 151 } 152 153 return ret; 154 } 155 156 157 static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset, 158 const uint8_t *buf, size_t buflen, 159 void *opaque, Error **errp) 160 { 161 BlockDriverState *bs = opaque; 162 BDRVQcow2State *s = bs->opaque; 163 ssize_t ret; 164 165 if ((offset + buflen) > s->crypto_header.length) { 166 error_setg(errp, "Request for data outside of extension header"); 167 return -1; 168 } 169 170 ret = bdrv_pwrite(bs->file, 171 s->crypto_header.offset + offset, buf, buflen); 172 if (ret < 0) { 173 error_setg_errno(errp, -ret, "Could not read encryption header"); 174 return -1; 175 } 176 return ret; 177 } 178 179 180 /* 181 * read qcow2 extension and fill bs 182 * start reading from start_offset 183 * finish reading upon magic of value 0 or when end_offset reached 184 * unknown magic is skipped (future extension this version knows nothing about) 185 * return 0 upon success, non-0 otherwise 186 */ 187 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 188 uint64_t end_offset, void **p_feature_table, 189 int flags, bool *need_update_header, 190 Error **errp) 191 { 192 BDRVQcow2State *s = bs->opaque; 193 QCowExtension ext; 194 uint64_t offset; 195 int ret; 196 Qcow2BitmapHeaderExt bitmaps_ext; 197 198 if (need_update_header != NULL) { 199 *need_update_header = false; 200 } 201 202 #ifdef DEBUG_EXT 203 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 204 #endif 205 offset = start_offset; 206 while (offset < end_offset) { 207 208 #ifdef DEBUG_EXT 209 /* Sanity check */ 210 if (offset > s->cluster_size) 211 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 212 213 printf("attempting to read extended header in offset %lu\n", offset); 214 #endif 215 216 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); 217 if (ret < 0) { 218 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " 219 "pread fail from offset %" PRIu64, offset); 220 return 1; 221 } 222 ext.magic = be32_to_cpu(ext.magic); 223 ext.len = be32_to_cpu(ext.len); 224 offset += sizeof(ext); 225 #ifdef DEBUG_EXT 226 printf("ext.magic = 0x%x\n", ext.magic); 227 #endif 228 if (offset > end_offset || ext.len > end_offset - offset) { 229 error_setg(errp, "Header extension too large"); 230 return -EINVAL; 231 } 232 233 switch (ext.magic) { 234 case QCOW2_EXT_MAGIC_END: 235 return 0; 236 237 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 238 if (ext.len >= sizeof(bs->backing_format)) { 239 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 240 " too large (>=%zu)", ext.len, 241 sizeof(bs->backing_format)); 242 return 2; 243 } 244 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); 245 if (ret < 0) { 246 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " 247 "Could not read format name"); 248 return 3; 249 } 250 bs->backing_format[ext.len] = '\0'; 251 s->image_backing_format = g_strdup(bs->backing_format); 252 #ifdef DEBUG_EXT 253 printf("Qcow2: Got format extension %s\n", bs->backing_format); 254 #endif 255 break; 256 257 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 258 if (p_feature_table != NULL) { 259 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 260 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 261 if (ret < 0) { 262 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " 263 "Could not read table"); 264 return ret; 265 } 266 267 *p_feature_table = feature_table; 268 } 269 break; 270 271 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { 272 unsigned int cflags = 0; 273 if (s->crypt_method_header != QCOW_CRYPT_LUKS) { 274 error_setg(errp, "CRYPTO header extension only " 275 "expected with LUKS encryption method"); 276 return -EINVAL; 277 } 278 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { 279 error_setg(errp, "CRYPTO header extension size %u, " 280 "but expected size %zu", ext.len, 281 sizeof(Qcow2CryptoHeaderExtension)); 282 return -EINVAL; 283 } 284 285 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); 286 if (ret < 0) { 287 error_setg_errno(errp, -ret, 288 "Unable to read CRYPTO header extension"); 289 return ret; 290 } 291 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 292 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 293 294 if ((s->crypto_header.offset % s->cluster_size) != 0) { 295 error_setg(errp, "Encryption header offset '%" PRIu64 "' is " 296 "not a multiple of cluster size '%u'", 297 s->crypto_header.offset, s->cluster_size); 298 return -EINVAL; 299 } 300 301 if (flags & BDRV_O_NO_IO) { 302 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 303 } 304 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 305 qcow2_crypto_hdr_read_func, 306 bs, cflags, QCOW2_MAX_THREADS, errp); 307 if (!s->crypto) { 308 return -EINVAL; 309 } 310 } break; 311 312 case QCOW2_EXT_MAGIC_BITMAPS: 313 if (ext.len != sizeof(bitmaps_ext)) { 314 error_setg_errno(errp, -ret, "bitmaps_ext: " 315 "Invalid extension length"); 316 return -EINVAL; 317 } 318 319 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) { 320 if (s->qcow_version < 3) { 321 /* Let's be a bit more specific */ 322 warn_report("This qcow2 v2 image contains bitmaps, but " 323 "they may have been modified by a program " 324 "without persistent bitmap support; so now " 325 "they must all be considered inconsistent"); 326 } else { 327 warn_report("a program lacking bitmap support " 328 "modified this file, so all bitmaps are now " 329 "considered inconsistent"); 330 } 331 error_printf("Some clusters may be leaked, " 332 "run 'qemu-img check -r' on the image " 333 "file to fix."); 334 if (need_update_header != NULL) { 335 /* Updating is needed to drop invalid bitmap extension. */ 336 *need_update_header = true; 337 } 338 break; 339 } 340 341 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len); 342 if (ret < 0) { 343 error_setg_errno(errp, -ret, "bitmaps_ext: " 344 "Could not read ext header"); 345 return ret; 346 } 347 348 if (bitmaps_ext.reserved32 != 0) { 349 error_setg_errno(errp, -ret, "bitmaps_ext: " 350 "Reserved field is not zero"); 351 return -EINVAL; 352 } 353 354 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps); 355 bitmaps_ext.bitmap_directory_size = 356 be64_to_cpu(bitmaps_ext.bitmap_directory_size); 357 bitmaps_ext.bitmap_directory_offset = 358 be64_to_cpu(bitmaps_ext.bitmap_directory_offset); 359 360 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) { 361 error_setg(errp, 362 "bitmaps_ext: Image has %" PRIu32 " bitmaps, " 363 "exceeding the QEMU supported maximum of %d", 364 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS); 365 return -EINVAL; 366 } 367 368 if (bitmaps_ext.nb_bitmaps == 0) { 369 error_setg(errp, "found bitmaps extension with zero bitmaps"); 370 return -EINVAL; 371 } 372 373 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) { 374 error_setg(errp, "bitmaps_ext: " 375 "invalid bitmap directory offset"); 376 return -EINVAL; 377 } 378 379 if (bitmaps_ext.bitmap_directory_size > 380 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) { 381 error_setg(errp, "bitmaps_ext: " 382 "bitmap directory size (%" PRIu64 ") exceeds " 383 "the maximum supported size (%d)", 384 bitmaps_ext.bitmap_directory_size, 385 QCOW2_MAX_BITMAP_DIRECTORY_SIZE); 386 return -EINVAL; 387 } 388 389 s->nb_bitmaps = bitmaps_ext.nb_bitmaps; 390 s->bitmap_directory_offset = 391 bitmaps_ext.bitmap_directory_offset; 392 s->bitmap_directory_size = 393 bitmaps_ext.bitmap_directory_size; 394 395 #ifdef DEBUG_EXT 396 printf("Qcow2: Got bitmaps extension: " 397 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n", 398 s->bitmap_directory_offset, s->nb_bitmaps); 399 #endif 400 break; 401 402 case QCOW2_EXT_MAGIC_DATA_FILE: 403 { 404 s->image_data_file = g_malloc0(ext.len + 1); 405 ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len); 406 if (ret < 0) { 407 error_setg_errno(errp, -ret, 408 "ERROR: Could not read data file name"); 409 return ret; 410 } 411 #ifdef DEBUG_EXT 412 printf("Qcow2: Got external data file %s\n", s->image_data_file); 413 #endif 414 break; 415 } 416 417 default: 418 /* unknown magic - save it in case we need to rewrite the header */ 419 /* If you add a new feature, make sure to also update the fast 420 * path of qcow2_make_empty() to deal with it. */ 421 { 422 Qcow2UnknownHeaderExtension *uext; 423 424 uext = g_malloc0(sizeof(*uext) + ext.len); 425 uext->magic = ext.magic; 426 uext->len = ext.len; 427 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 428 429 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 430 if (ret < 0) { 431 error_setg_errno(errp, -ret, "ERROR: unknown extension: " 432 "Could not read data"); 433 return ret; 434 } 435 } 436 break; 437 } 438 439 offset += ((ext.len + 7) & ~7); 440 } 441 442 return 0; 443 } 444 445 static void cleanup_unknown_header_ext(BlockDriverState *bs) 446 { 447 BDRVQcow2State *s = bs->opaque; 448 Qcow2UnknownHeaderExtension *uext, *next; 449 450 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 451 QLIST_REMOVE(uext, next); 452 g_free(uext); 453 } 454 } 455 456 static void report_unsupported_feature(Error **errp, Qcow2Feature *table, 457 uint64_t mask) 458 { 459 g_autoptr(GString) features = g_string_sized_new(60); 460 461 while (table && table->name[0] != '\0') { 462 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 463 if (mask & (1ULL << table->bit)) { 464 if (features->len > 0) { 465 g_string_append(features, ", "); 466 } 467 g_string_append_printf(features, "%.46s", table->name); 468 mask &= ~(1ULL << table->bit); 469 } 470 } 471 table++; 472 } 473 474 if (mask) { 475 if (features->len > 0) { 476 g_string_append(features, ", "); 477 } 478 g_string_append_printf(features, 479 "Unknown incompatible feature: %" PRIx64, mask); 480 } 481 482 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str); 483 } 484 485 /* 486 * Sets the dirty bit and flushes afterwards if necessary. 487 * 488 * The incompatible_features bit is only set if the image file header was 489 * updated successfully. Therefore it is not required to check the return 490 * value of this function. 491 */ 492 int qcow2_mark_dirty(BlockDriverState *bs) 493 { 494 BDRVQcow2State *s = bs->opaque; 495 uint64_t val; 496 int ret; 497 498 assert(s->qcow_version >= 3); 499 500 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 501 return 0; /* already dirty */ 502 } 503 504 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 505 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 506 &val, sizeof(val)); 507 if (ret < 0) { 508 return ret; 509 } 510 ret = bdrv_flush(bs->file->bs); 511 if (ret < 0) { 512 return ret; 513 } 514 515 /* Only treat image as dirty if the header was updated successfully */ 516 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 517 return 0; 518 } 519 520 /* 521 * Clears the dirty bit and flushes before if necessary. Only call this 522 * function when there are no pending requests, it does not guard against 523 * concurrent requests dirtying the image. 524 */ 525 static int qcow2_mark_clean(BlockDriverState *bs) 526 { 527 BDRVQcow2State *s = bs->opaque; 528 529 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 530 int ret; 531 532 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 533 534 ret = qcow2_flush_caches(bs); 535 if (ret < 0) { 536 return ret; 537 } 538 539 return qcow2_update_header(bs); 540 } 541 return 0; 542 } 543 544 /* 545 * Marks the image as corrupt. 546 */ 547 int qcow2_mark_corrupt(BlockDriverState *bs) 548 { 549 BDRVQcow2State *s = bs->opaque; 550 551 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT; 552 return qcow2_update_header(bs); 553 } 554 555 /* 556 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes 557 * before if necessary. 558 */ 559 int qcow2_mark_consistent(BlockDriverState *bs) 560 { 561 BDRVQcow2State *s = bs->opaque; 562 563 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 564 int ret = qcow2_flush_caches(bs); 565 if (ret < 0) { 566 return ret; 567 } 568 569 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT; 570 return qcow2_update_header(bs); 571 } 572 return 0; 573 } 574 575 static void qcow2_add_check_result(BdrvCheckResult *out, 576 const BdrvCheckResult *src, 577 bool set_allocation_info) 578 { 579 out->corruptions += src->corruptions; 580 out->leaks += src->leaks; 581 out->check_errors += src->check_errors; 582 out->corruptions_fixed += src->corruptions_fixed; 583 out->leaks_fixed += src->leaks_fixed; 584 585 if (set_allocation_info) { 586 out->image_end_offset = src->image_end_offset; 587 out->bfi = src->bfi; 588 } 589 } 590 591 static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs, 592 BdrvCheckResult *result, 593 BdrvCheckMode fix) 594 { 595 BdrvCheckResult snapshot_res = {}; 596 BdrvCheckResult refcount_res = {}; 597 int ret; 598 599 memset(result, 0, sizeof(*result)); 600 601 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix); 602 if (ret < 0) { 603 qcow2_add_check_result(result, &snapshot_res, false); 604 return ret; 605 } 606 607 ret = qcow2_check_refcounts(bs, &refcount_res, fix); 608 qcow2_add_check_result(result, &refcount_res, true); 609 if (ret < 0) { 610 qcow2_add_check_result(result, &snapshot_res, false); 611 return ret; 612 } 613 614 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix); 615 qcow2_add_check_result(result, &snapshot_res, false); 616 if (ret < 0) { 617 return ret; 618 } 619 620 if (fix && result->check_errors == 0 && result->corruptions == 0) { 621 ret = qcow2_mark_clean(bs); 622 if (ret < 0) { 623 return ret; 624 } 625 return qcow2_mark_consistent(bs); 626 } 627 return ret; 628 } 629 630 static int coroutine_fn qcow2_co_check(BlockDriverState *bs, 631 BdrvCheckResult *result, 632 BdrvCheckMode fix) 633 { 634 BDRVQcow2State *s = bs->opaque; 635 int ret; 636 637 qemu_co_mutex_lock(&s->lock); 638 ret = qcow2_co_check_locked(bs, result, fix); 639 qemu_co_mutex_unlock(&s->lock); 640 return ret; 641 } 642 643 int qcow2_validate_table(BlockDriverState *bs, uint64_t offset, 644 uint64_t entries, size_t entry_len, 645 int64_t max_size_bytes, const char *table_name, 646 Error **errp) 647 { 648 BDRVQcow2State *s = bs->opaque; 649 650 if (entries > max_size_bytes / entry_len) { 651 error_setg(errp, "%s too large", table_name); 652 return -EFBIG; 653 } 654 655 /* Use signed INT64_MAX as the maximum even for uint64_t header fields, 656 * because values will be passed to qemu functions taking int64_t. */ 657 if ((INT64_MAX - entries * entry_len < offset) || 658 (offset_into_cluster(s, offset) != 0)) { 659 error_setg(errp, "%s offset invalid", table_name); 660 return -EINVAL; 661 } 662 663 return 0; 664 } 665 666 static const char *const mutable_opts[] = { 667 QCOW2_OPT_LAZY_REFCOUNTS, 668 QCOW2_OPT_DISCARD_REQUEST, 669 QCOW2_OPT_DISCARD_SNAPSHOT, 670 QCOW2_OPT_DISCARD_OTHER, 671 QCOW2_OPT_OVERLAP, 672 QCOW2_OPT_OVERLAP_TEMPLATE, 673 QCOW2_OPT_OVERLAP_MAIN_HEADER, 674 QCOW2_OPT_OVERLAP_ACTIVE_L1, 675 QCOW2_OPT_OVERLAP_ACTIVE_L2, 676 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 677 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 678 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 679 QCOW2_OPT_OVERLAP_INACTIVE_L1, 680 QCOW2_OPT_OVERLAP_INACTIVE_L2, 681 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 682 QCOW2_OPT_CACHE_SIZE, 683 QCOW2_OPT_L2_CACHE_SIZE, 684 QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 685 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 686 QCOW2_OPT_CACHE_CLEAN_INTERVAL, 687 NULL 688 }; 689 690 static QemuOptsList qcow2_runtime_opts = { 691 .name = "qcow2", 692 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head), 693 .desc = { 694 { 695 .name = QCOW2_OPT_LAZY_REFCOUNTS, 696 .type = QEMU_OPT_BOOL, 697 .help = "Postpone refcount updates", 698 }, 699 { 700 .name = QCOW2_OPT_DISCARD_REQUEST, 701 .type = QEMU_OPT_BOOL, 702 .help = "Pass guest discard requests to the layer below", 703 }, 704 { 705 .name = QCOW2_OPT_DISCARD_SNAPSHOT, 706 .type = QEMU_OPT_BOOL, 707 .help = "Generate discard requests when snapshot related space " 708 "is freed", 709 }, 710 { 711 .name = QCOW2_OPT_DISCARD_OTHER, 712 .type = QEMU_OPT_BOOL, 713 .help = "Generate discard requests when other clusters are freed", 714 }, 715 { 716 .name = QCOW2_OPT_OVERLAP, 717 .type = QEMU_OPT_STRING, 718 .help = "Selects which overlap checks to perform from a range of " 719 "templates (none, constant, cached, all)", 720 }, 721 { 722 .name = QCOW2_OPT_OVERLAP_TEMPLATE, 723 .type = QEMU_OPT_STRING, 724 .help = "Selects which overlap checks to perform from a range of " 725 "templates (none, constant, cached, all)", 726 }, 727 { 728 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER, 729 .type = QEMU_OPT_BOOL, 730 .help = "Check for unintended writes into the main qcow2 header", 731 }, 732 { 733 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1, 734 .type = QEMU_OPT_BOOL, 735 .help = "Check for unintended writes into the active L1 table", 736 }, 737 { 738 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2, 739 .type = QEMU_OPT_BOOL, 740 .help = "Check for unintended writes into an active L2 table", 741 }, 742 { 743 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 744 .type = QEMU_OPT_BOOL, 745 .help = "Check for unintended writes into the refcount table", 746 }, 747 { 748 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 749 .type = QEMU_OPT_BOOL, 750 .help = "Check for unintended writes into a refcount block", 751 }, 752 { 753 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 754 .type = QEMU_OPT_BOOL, 755 .help = "Check for unintended writes into the snapshot table", 756 }, 757 { 758 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1, 759 .type = QEMU_OPT_BOOL, 760 .help = "Check for unintended writes into an inactive L1 table", 761 }, 762 { 763 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2, 764 .type = QEMU_OPT_BOOL, 765 .help = "Check for unintended writes into an inactive L2 table", 766 }, 767 { 768 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 769 .type = QEMU_OPT_BOOL, 770 .help = "Check for unintended writes into the bitmap directory", 771 }, 772 { 773 .name = QCOW2_OPT_CACHE_SIZE, 774 .type = QEMU_OPT_SIZE, 775 .help = "Maximum combined metadata (L2 tables and refcount blocks) " 776 "cache size", 777 }, 778 { 779 .name = QCOW2_OPT_L2_CACHE_SIZE, 780 .type = QEMU_OPT_SIZE, 781 .help = "Maximum L2 table cache size", 782 }, 783 { 784 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE, 785 .type = QEMU_OPT_SIZE, 786 .help = "Size of each entry in the L2 cache", 787 }, 788 { 789 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE, 790 .type = QEMU_OPT_SIZE, 791 .help = "Maximum refcount block cache size", 792 }, 793 { 794 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL, 795 .type = QEMU_OPT_NUMBER, 796 .help = "Clean unused cache entries after this time (in seconds)", 797 }, 798 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 799 "ID of secret providing qcow2 AES key or LUKS passphrase"), 800 { /* end of list */ } 801 }, 802 }; 803 804 static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = { 805 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER, 806 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1, 807 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2, 808 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE, 809 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK, 810 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE, 811 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1, 812 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2, 813 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY, 814 }; 815 816 static void cache_clean_timer_cb(void *opaque) 817 { 818 BlockDriverState *bs = opaque; 819 BDRVQcow2State *s = bs->opaque; 820 qcow2_cache_clean_unused(s->l2_table_cache); 821 qcow2_cache_clean_unused(s->refcount_block_cache); 822 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 823 (int64_t) s->cache_clean_interval * 1000); 824 } 825 826 static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context) 827 { 828 BDRVQcow2State *s = bs->opaque; 829 if (s->cache_clean_interval > 0) { 830 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL, 831 SCALE_MS, cache_clean_timer_cb, 832 bs); 833 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 834 (int64_t) s->cache_clean_interval * 1000); 835 } 836 } 837 838 static void cache_clean_timer_del(BlockDriverState *bs) 839 { 840 BDRVQcow2State *s = bs->opaque; 841 if (s->cache_clean_timer) { 842 timer_del(s->cache_clean_timer); 843 timer_free(s->cache_clean_timer); 844 s->cache_clean_timer = NULL; 845 } 846 } 847 848 static void qcow2_detach_aio_context(BlockDriverState *bs) 849 { 850 cache_clean_timer_del(bs); 851 } 852 853 static void qcow2_attach_aio_context(BlockDriverState *bs, 854 AioContext *new_context) 855 { 856 cache_clean_timer_init(bs, new_context); 857 } 858 859 static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts, 860 uint64_t *l2_cache_size, 861 uint64_t *l2_cache_entry_size, 862 uint64_t *refcount_cache_size, Error **errp) 863 { 864 BDRVQcow2State *s = bs->opaque; 865 uint64_t combined_cache_size, l2_cache_max_setting; 866 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set; 867 bool l2_cache_entry_size_set; 868 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size; 869 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE; 870 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size); 871 /* An L2 table is always one cluster in size so the max cache size 872 * should be a multiple of the cluster size. */ 873 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * sizeof(uint64_t), 874 s->cluster_size); 875 876 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE); 877 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE); 878 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 879 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE); 880 881 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0); 882 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE, 883 DEFAULT_L2_CACHE_MAX_SIZE); 884 *refcount_cache_size = qemu_opt_get_size(opts, 885 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0); 886 887 *l2_cache_entry_size = qemu_opt_get_size( 888 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size); 889 890 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting); 891 892 if (combined_cache_size_set) { 893 if (l2_cache_size_set && refcount_cache_size_set) { 894 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE 895 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set " 896 "at the same time"); 897 return; 898 } else if (l2_cache_size_set && 899 (l2_cache_max_setting > combined_cache_size)) { 900 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed " 901 QCOW2_OPT_CACHE_SIZE); 902 return; 903 } else if (*refcount_cache_size > combined_cache_size) { 904 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed " 905 QCOW2_OPT_CACHE_SIZE); 906 return; 907 } 908 909 if (l2_cache_size_set) { 910 *refcount_cache_size = combined_cache_size - *l2_cache_size; 911 } else if (refcount_cache_size_set) { 912 *l2_cache_size = combined_cache_size - *refcount_cache_size; 913 } else { 914 /* Assign as much memory as possible to the L2 cache, and 915 * use the remainder for the refcount cache */ 916 if (combined_cache_size >= max_l2_cache + min_refcount_cache) { 917 *l2_cache_size = max_l2_cache; 918 *refcount_cache_size = combined_cache_size - *l2_cache_size; 919 } else { 920 *refcount_cache_size = 921 MIN(combined_cache_size, min_refcount_cache); 922 *l2_cache_size = combined_cache_size - *refcount_cache_size; 923 } 924 } 925 } 926 927 /* 928 * If the L2 cache is not enough to cover the whole disk then 929 * default to 4KB entries. Smaller entries reduce the cost of 930 * loads and evictions and increase I/O performance. 931 */ 932 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) { 933 *l2_cache_entry_size = MIN(s->cluster_size, 4096); 934 } 935 936 /* l2_cache_size and refcount_cache_size are ensured to have at least 937 * their minimum values in qcow2_update_options_prepare() */ 938 939 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) || 940 *l2_cache_entry_size > s->cluster_size || 941 !is_power_of_2(*l2_cache_entry_size)) { 942 error_setg(errp, "L2 cache entry size must be a power of two " 943 "between %d and the cluster size (%d)", 944 1 << MIN_CLUSTER_BITS, s->cluster_size); 945 return; 946 } 947 } 948 949 typedef struct Qcow2ReopenState { 950 Qcow2Cache *l2_table_cache; 951 Qcow2Cache *refcount_block_cache; 952 int l2_slice_size; /* Number of entries in a slice of the L2 table */ 953 bool use_lazy_refcounts; 954 int overlap_check; 955 bool discard_passthrough[QCOW2_DISCARD_MAX]; 956 uint64_t cache_clean_interval; 957 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */ 958 } Qcow2ReopenState; 959 960 static int qcow2_update_options_prepare(BlockDriverState *bs, 961 Qcow2ReopenState *r, 962 QDict *options, int flags, 963 Error **errp) 964 { 965 BDRVQcow2State *s = bs->opaque; 966 QemuOpts *opts = NULL; 967 const char *opt_overlap_check, *opt_overlap_check_template; 968 int overlap_check_template = 0; 969 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size; 970 int i; 971 const char *encryptfmt; 972 QDict *encryptopts = NULL; 973 Error *local_err = NULL; 974 int ret; 975 976 qdict_extract_subqdict(options, &encryptopts, "encrypt."); 977 encryptfmt = qdict_get_try_str(encryptopts, "format"); 978 979 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort); 980 qemu_opts_absorb_qdict(opts, options, &local_err); 981 if (local_err) { 982 error_propagate(errp, local_err); 983 ret = -EINVAL; 984 goto fail; 985 } 986 987 /* get L2 table/refcount block cache size from command line options */ 988 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size, 989 &refcount_cache_size, &local_err); 990 if (local_err) { 991 error_propagate(errp, local_err); 992 ret = -EINVAL; 993 goto fail; 994 } 995 996 l2_cache_size /= l2_cache_entry_size; 997 if (l2_cache_size < MIN_L2_CACHE_SIZE) { 998 l2_cache_size = MIN_L2_CACHE_SIZE; 999 } 1000 if (l2_cache_size > INT_MAX) { 1001 error_setg(errp, "L2 cache size too big"); 1002 ret = -EINVAL; 1003 goto fail; 1004 } 1005 1006 refcount_cache_size /= s->cluster_size; 1007 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) { 1008 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE; 1009 } 1010 if (refcount_cache_size > INT_MAX) { 1011 error_setg(errp, "Refcount cache size too big"); 1012 ret = -EINVAL; 1013 goto fail; 1014 } 1015 1016 /* alloc new L2 table/refcount block cache, flush old one */ 1017 if (s->l2_table_cache) { 1018 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1019 if (ret) { 1020 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache"); 1021 goto fail; 1022 } 1023 } 1024 1025 if (s->refcount_block_cache) { 1026 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1027 if (ret) { 1028 error_setg_errno(errp, -ret, 1029 "Failed to flush the refcount block cache"); 1030 goto fail; 1031 } 1032 } 1033 1034 r->l2_slice_size = l2_cache_entry_size / sizeof(uint64_t); 1035 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size, 1036 l2_cache_entry_size); 1037 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size, 1038 s->cluster_size); 1039 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) { 1040 error_setg(errp, "Could not allocate metadata caches"); 1041 ret = -ENOMEM; 1042 goto fail; 1043 } 1044 1045 /* New interval for cache cleanup timer */ 1046 r->cache_clean_interval = 1047 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL, 1048 DEFAULT_CACHE_CLEAN_INTERVAL); 1049 #ifndef CONFIG_LINUX 1050 if (r->cache_clean_interval != 0) { 1051 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL 1052 " not supported on this host"); 1053 ret = -EINVAL; 1054 goto fail; 1055 } 1056 #endif 1057 if (r->cache_clean_interval > UINT_MAX) { 1058 error_setg(errp, "Cache clean interval too big"); 1059 ret = -EINVAL; 1060 goto fail; 1061 } 1062 1063 /* lazy-refcounts; flush if going from enabled to disabled */ 1064 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS, 1065 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)); 1066 if (r->use_lazy_refcounts && s->qcow_version < 3) { 1067 error_setg(errp, "Lazy refcounts require a qcow2 image with at least " 1068 "qemu 1.1 compatibility level"); 1069 ret = -EINVAL; 1070 goto fail; 1071 } 1072 1073 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) { 1074 ret = qcow2_mark_clean(bs); 1075 if (ret < 0) { 1076 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts"); 1077 goto fail; 1078 } 1079 } 1080 1081 /* Overlap check options */ 1082 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP); 1083 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE); 1084 if (opt_overlap_check_template && opt_overlap_check && 1085 strcmp(opt_overlap_check_template, opt_overlap_check)) 1086 { 1087 error_setg(errp, "Conflicting values for qcow2 options '" 1088 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE 1089 "' ('%s')", opt_overlap_check, opt_overlap_check_template); 1090 ret = -EINVAL; 1091 goto fail; 1092 } 1093 if (!opt_overlap_check) { 1094 opt_overlap_check = opt_overlap_check_template ?: "cached"; 1095 } 1096 1097 if (!strcmp(opt_overlap_check, "none")) { 1098 overlap_check_template = 0; 1099 } else if (!strcmp(opt_overlap_check, "constant")) { 1100 overlap_check_template = QCOW2_OL_CONSTANT; 1101 } else if (!strcmp(opt_overlap_check, "cached")) { 1102 overlap_check_template = QCOW2_OL_CACHED; 1103 } else if (!strcmp(opt_overlap_check, "all")) { 1104 overlap_check_template = QCOW2_OL_ALL; 1105 } else { 1106 error_setg(errp, "Unsupported value '%s' for qcow2 option " 1107 "'overlap-check'. Allowed are any of the following: " 1108 "none, constant, cached, all", opt_overlap_check); 1109 ret = -EINVAL; 1110 goto fail; 1111 } 1112 1113 r->overlap_check = 0; 1114 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) { 1115 /* overlap-check defines a template bitmask, but every flag may be 1116 * overwritten through the associated boolean option */ 1117 r->overlap_check |= 1118 qemu_opt_get_bool(opts, overlap_bool_option_names[i], 1119 overlap_check_template & (1 << i)) << i; 1120 } 1121 1122 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false; 1123 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true; 1124 r->discard_passthrough[QCOW2_DISCARD_REQUEST] = 1125 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST, 1126 flags & BDRV_O_UNMAP); 1127 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] = 1128 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true); 1129 r->discard_passthrough[QCOW2_DISCARD_OTHER] = 1130 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false); 1131 1132 switch (s->crypt_method_header) { 1133 case QCOW_CRYPT_NONE: 1134 if (encryptfmt) { 1135 error_setg(errp, "No encryption in image header, but options " 1136 "specified format '%s'", encryptfmt); 1137 ret = -EINVAL; 1138 goto fail; 1139 } 1140 break; 1141 1142 case QCOW_CRYPT_AES: 1143 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) { 1144 error_setg(errp, 1145 "Header reported 'aes' encryption format but " 1146 "options specify '%s'", encryptfmt); 1147 ret = -EINVAL; 1148 goto fail; 1149 } 1150 qdict_put_str(encryptopts, "format", "qcow"); 1151 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1152 break; 1153 1154 case QCOW_CRYPT_LUKS: 1155 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) { 1156 error_setg(errp, 1157 "Header reported 'luks' encryption format but " 1158 "options specify '%s'", encryptfmt); 1159 ret = -EINVAL; 1160 goto fail; 1161 } 1162 qdict_put_str(encryptopts, "format", "luks"); 1163 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp); 1164 break; 1165 1166 default: 1167 error_setg(errp, "Unsupported encryption method %d", 1168 s->crypt_method_header); 1169 break; 1170 } 1171 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) { 1172 ret = -EINVAL; 1173 goto fail; 1174 } 1175 1176 ret = 0; 1177 fail: 1178 qobject_unref(encryptopts); 1179 qemu_opts_del(opts); 1180 opts = NULL; 1181 return ret; 1182 } 1183 1184 static void qcow2_update_options_commit(BlockDriverState *bs, 1185 Qcow2ReopenState *r) 1186 { 1187 BDRVQcow2State *s = bs->opaque; 1188 int i; 1189 1190 if (s->l2_table_cache) { 1191 qcow2_cache_destroy(s->l2_table_cache); 1192 } 1193 if (s->refcount_block_cache) { 1194 qcow2_cache_destroy(s->refcount_block_cache); 1195 } 1196 s->l2_table_cache = r->l2_table_cache; 1197 s->refcount_block_cache = r->refcount_block_cache; 1198 s->l2_slice_size = r->l2_slice_size; 1199 1200 s->overlap_check = r->overlap_check; 1201 s->use_lazy_refcounts = r->use_lazy_refcounts; 1202 1203 for (i = 0; i < QCOW2_DISCARD_MAX; i++) { 1204 s->discard_passthrough[i] = r->discard_passthrough[i]; 1205 } 1206 1207 if (s->cache_clean_interval != r->cache_clean_interval) { 1208 cache_clean_timer_del(bs); 1209 s->cache_clean_interval = r->cache_clean_interval; 1210 cache_clean_timer_init(bs, bdrv_get_aio_context(bs)); 1211 } 1212 1213 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1214 s->crypto_opts = r->crypto_opts; 1215 } 1216 1217 static void qcow2_update_options_abort(BlockDriverState *bs, 1218 Qcow2ReopenState *r) 1219 { 1220 if (r->l2_table_cache) { 1221 qcow2_cache_destroy(r->l2_table_cache); 1222 } 1223 if (r->refcount_block_cache) { 1224 qcow2_cache_destroy(r->refcount_block_cache); 1225 } 1226 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts); 1227 } 1228 1229 static int qcow2_update_options(BlockDriverState *bs, QDict *options, 1230 int flags, Error **errp) 1231 { 1232 Qcow2ReopenState r = {}; 1233 int ret; 1234 1235 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp); 1236 if (ret >= 0) { 1237 qcow2_update_options_commit(bs, &r); 1238 } else { 1239 qcow2_update_options_abort(bs, &r); 1240 } 1241 1242 return ret; 1243 } 1244 1245 /* Called with s->lock held. */ 1246 static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, 1247 int flags, Error **errp) 1248 { 1249 BDRVQcow2State *s = bs->opaque; 1250 unsigned int len, i; 1251 int ret = 0; 1252 QCowHeader header; 1253 Error *local_err = NULL; 1254 uint64_t ext_end; 1255 uint64_t l1_vm_state_index; 1256 bool update_header = false; 1257 1258 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 1259 if (ret < 0) { 1260 error_setg_errno(errp, -ret, "Could not read qcow2 header"); 1261 goto fail; 1262 } 1263 header.magic = be32_to_cpu(header.magic); 1264 header.version = be32_to_cpu(header.version); 1265 header.backing_file_offset = be64_to_cpu(header.backing_file_offset); 1266 header.backing_file_size = be32_to_cpu(header.backing_file_size); 1267 header.size = be64_to_cpu(header.size); 1268 header.cluster_bits = be32_to_cpu(header.cluster_bits); 1269 header.crypt_method = be32_to_cpu(header.crypt_method); 1270 header.l1_table_offset = be64_to_cpu(header.l1_table_offset); 1271 header.l1_size = be32_to_cpu(header.l1_size); 1272 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset); 1273 header.refcount_table_clusters = 1274 be32_to_cpu(header.refcount_table_clusters); 1275 header.snapshots_offset = be64_to_cpu(header.snapshots_offset); 1276 header.nb_snapshots = be32_to_cpu(header.nb_snapshots); 1277 1278 if (header.magic != QCOW_MAGIC) { 1279 error_setg(errp, "Image is not in qcow2 format"); 1280 ret = -EINVAL; 1281 goto fail; 1282 } 1283 if (header.version < 2 || header.version > 3) { 1284 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version); 1285 ret = -ENOTSUP; 1286 goto fail; 1287 } 1288 1289 s->qcow_version = header.version; 1290 1291 /* Initialise cluster size */ 1292 if (header.cluster_bits < MIN_CLUSTER_BITS || 1293 header.cluster_bits > MAX_CLUSTER_BITS) { 1294 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32, 1295 header.cluster_bits); 1296 ret = -EINVAL; 1297 goto fail; 1298 } 1299 1300 s->cluster_bits = header.cluster_bits; 1301 s->cluster_size = 1 << s->cluster_bits; 1302 1303 /* Initialise version 3 header fields */ 1304 if (header.version == 2) { 1305 header.incompatible_features = 0; 1306 header.compatible_features = 0; 1307 header.autoclear_features = 0; 1308 header.refcount_order = 4; 1309 header.header_length = 72; 1310 } else { 1311 header.incompatible_features = 1312 be64_to_cpu(header.incompatible_features); 1313 header.compatible_features = be64_to_cpu(header.compatible_features); 1314 header.autoclear_features = be64_to_cpu(header.autoclear_features); 1315 header.refcount_order = be32_to_cpu(header.refcount_order); 1316 header.header_length = be32_to_cpu(header.header_length); 1317 1318 if (header.header_length < 104) { 1319 error_setg(errp, "qcow2 header too short"); 1320 ret = -EINVAL; 1321 goto fail; 1322 } 1323 } 1324 1325 if (header.header_length > s->cluster_size) { 1326 error_setg(errp, "qcow2 header exceeds cluster size"); 1327 ret = -EINVAL; 1328 goto fail; 1329 } 1330 1331 if (header.header_length > sizeof(header)) { 1332 s->unknown_header_fields_size = header.header_length - sizeof(header); 1333 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 1334 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 1335 s->unknown_header_fields_size); 1336 if (ret < 0) { 1337 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header " 1338 "fields"); 1339 goto fail; 1340 } 1341 } 1342 1343 if (header.backing_file_offset > s->cluster_size) { 1344 error_setg(errp, "Invalid backing file offset"); 1345 ret = -EINVAL; 1346 goto fail; 1347 } 1348 1349 if (header.backing_file_offset) { 1350 ext_end = header.backing_file_offset; 1351 } else { 1352 ext_end = 1 << header.cluster_bits; 1353 } 1354 1355 /* Handle feature bits */ 1356 s->incompatible_features = header.incompatible_features; 1357 s->compatible_features = header.compatible_features; 1358 s->autoclear_features = header.autoclear_features; 1359 1360 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 1361 void *feature_table = NULL; 1362 qcow2_read_extensions(bs, header.header_length, ext_end, 1363 &feature_table, flags, NULL, NULL); 1364 report_unsupported_feature(errp, feature_table, 1365 s->incompatible_features & 1366 ~QCOW2_INCOMPAT_MASK); 1367 ret = -ENOTSUP; 1368 g_free(feature_table); 1369 goto fail; 1370 } 1371 1372 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) { 1373 /* Corrupt images may not be written to unless they are being repaired 1374 */ 1375 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) { 1376 error_setg(errp, "qcow2: Image is corrupt; cannot be opened " 1377 "read/write"); 1378 ret = -EACCES; 1379 goto fail; 1380 } 1381 } 1382 1383 /* Check support for various header values */ 1384 if (header.refcount_order > 6) { 1385 error_setg(errp, "Reference count entry width too large; may not " 1386 "exceed 64 bits"); 1387 ret = -EINVAL; 1388 goto fail; 1389 } 1390 s->refcount_order = header.refcount_order; 1391 s->refcount_bits = 1 << s->refcount_order; 1392 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 1393 s->refcount_max += s->refcount_max - 1; 1394 1395 s->crypt_method_header = header.crypt_method; 1396 if (s->crypt_method_header) { 1397 if (bdrv_uses_whitelist() && 1398 s->crypt_method_header == QCOW_CRYPT_AES) { 1399 error_setg(errp, 1400 "Use of AES-CBC encrypted qcow2 images is no longer " 1401 "supported in system emulators"); 1402 error_append_hint(errp, 1403 "You can use 'qemu-img convert' to convert your " 1404 "image to an alternative supported format, such " 1405 "as unencrypted qcow2, or raw with the LUKS " 1406 "format instead.\n"); 1407 ret = -ENOSYS; 1408 goto fail; 1409 } 1410 1411 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1412 s->crypt_physical_offset = false; 1413 } else { 1414 /* Assuming LUKS and any future crypt methods we 1415 * add will all use physical offsets, due to the 1416 * fact that the alternative is insecure... */ 1417 s->crypt_physical_offset = true; 1418 } 1419 1420 bs->encrypted = true; 1421 } 1422 1423 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 1424 s->l2_size = 1 << s->l2_bits; 1425 /* 2^(s->refcount_order - 3) is the refcount width in bytes */ 1426 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3); 1427 s->refcount_block_size = 1 << s->refcount_block_bits; 1428 bs->total_sectors = header.size / BDRV_SECTOR_SIZE; 1429 s->csize_shift = (62 - (s->cluster_bits - 8)); 1430 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 1431 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 1432 1433 s->refcount_table_offset = header.refcount_table_offset; 1434 s->refcount_table_size = 1435 header.refcount_table_clusters << (s->cluster_bits - 3); 1436 1437 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) { 1438 error_setg(errp, "Image does not contain a reference count table"); 1439 ret = -EINVAL; 1440 goto fail; 1441 } 1442 1443 ret = qcow2_validate_table(bs, s->refcount_table_offset, 1444 header.refcount_table_clusters, 1445 s->cluster_size, QCOW_MAX_REFTABLE_SIZE, 1446 "Reference count table", errp); 1447 if (ret < 0) { 1448 goto fail; 1449 } 1450 1451 if (!(flags & BDRV_O_CHECK)) { 1452 /* 1453 * The total size in bytes of the snapshot table is checked in 1454 * qcow2_read_snapshots() because the size of each snapshot is 1455 * variable and we don't know it yet. 1456 * Here we only check the offset and number of snapshots. 1457 */ 1458 ret = qcow2_validate_table(bs, header.snapshots_offset, 1459 header.nb_snapshots, 1460 sizeof(QCowSnapshotHeader), 1461 sizeof(QCowSnapshotHeader) * 1462 QCOW_MAX_SNAPSHOTS, 1463 "Snapshot table", errp); 1464 if (ret < 0) { 1465 goto fail; 1466 } 1467 } 1468 1469 /* read the level 1 table */ 1470 ret = qcow2_validate_table(bs, header.l1_table_offset, 1471 header.l1_size, sizeof(uint64_t), 1472 QCOW_MAX_L1_SIZE, "Active L1 table", errp); 1473 if (ret < 0) { 1474 goto fail; 1475 } 1476 s->l1_size = header.l1_size; 1477 s->l1_table_offset = header.l1_table_offset; 1478 1479 l1_vm_state_index = size_to_l1(s, header.size); 1480 if (l1_vm_state_index > INT_MAX) { 1481 error_setg(errp, "Image is too big"); 1482 ret = -EFBIG; 1483 goto fail; 1484 } 1485 s->l1_vm_state_index = l1_vm_state_index; 1486 1487 /* the L1 table must contain at least enough entries to put 1488 header.size bytes */ 1489 if (s->l1_size < s->l1_vm_state_index) { 1490 error_setg(errp, "L1 table is too small"); 1491 ret = -EINVAL; 1492 goto fail; 1493 } 1494 1495 if (s->l1_size > 0) { 1496 s->l1_table = qemu_try_blockalign(bs->file->bs, 1497 s->l1_size * sizeof(uint64_t)); 1498 if (s->l1_table == NULL) { 1499 error_setg(errp, "Could not allocate L1 table"); 1500 ret = -ENOMEM; 1501 goto fail; 1502 } 1503 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 1504 s->l1_size * sizeof(uint64_t)); 1505 if (ret < 0) { 1506 error_setg_errno(errp, -ret, "Could not read L1 table"); 1507 goto fail; 1508 } 1509 for(i = 0;i < s->l1_size; i++) { 1510 s->l1_table[i] = be64_to_cpu(s->l1_table[i]); 1511 } 1512 } 1513 1514 /* Parse driver-specific options */ 1515 ret = qcow2_update_options(bs, options, flags, errp); 1516 if (ret < 0) { 1517 goto fail; 1518 } 1519 1520 s->flags = flags; 1521 1522 ret = qcow2_refcount_init(bs); 1523 if (ret != 0) { 1524 error_setg_errno(errp, -ret, "Could not initialize refcount handling"); 1525 goto fail; 1526 } 1527 1528 QLIST_INIT(&s->cluster_allocs); 1529 QTAILQ_INIT(&s->discards); 1530 1531 /* read qcow2 extensions */ 1532 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL, 1533 flags, &update_header, &local_err)) { 1534 error_propagate(errp, local_err); 1535 ret = -EINVAL; 1536 goto fail; 1537 } 1538 1539 /* Open external data file */ 1540 s->data_file = bdrv_open_child(NULL, options, "data-file", bs, &child_file, 1541 true, &local_err); 1542 if (local_err) { 1543 error_propagate(errp, local_err); 1544 ret = -EINVAL; 1545 goto fail; 1546 } 1547 1548 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { 1549 if (!s->data_file && s->image_data_file) { 1550 s->data_file = bdrv_open_child(s->image_data_file, options, 1551 "data-file", bs, &child_file, 1552 false, errp); 1553 if (!s->data_file) { 1554 ret = -EINVAL; 1555 goto fail; 1556 } 1557 } 1558 if (!s->data_file) { 1559 error_setg(errp, "'data-file' is required for this image"); 1560 ret = -EINVAL; 1561 goto fail; 1562 } 1563 } else { 1564 if (s->data_file) { 1565 error_setg(errp, "'data-file' can only be set for images with an " 1566 "external data file"); 1567 ret = -EINVAL; 1568 goto fail; 1569 } 1570 1571 s->data_file = bs->file; 1572 1573 if (data_file_is_raw(bs)) { 1574 error_setg(errp, "data-file-raw requires a data file"); 1575 ret = -EINVAL; 1576 goto fail; 1577 } 1578 } 1579 1580 /* qcow2_read_extension may have set up the crypto context 1581 * if the crypt method needs a header region, some methods 1582 * don't need header extensions, so must check here 1583 */ 1584 if (s->crypt_method_header && !s->crypto) { 1585 if (s->crypt_method_header == QCOW_CRYPT_AES) { 1586 unsigned int cflags = 0; 1587 if (flags & BDRV_O_NO_IO) { 1588 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; 1589 } 1590 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", 1591 NULL, NULL, cflags, 1592 QCOW2_MAX_THREADS, errp); 1593 if (!s->crypto) { 1594 ret = -EINVAL; 1595 goto fail; 1596 } 1597 } else if (!(flags & BDRV_O_NO_IO)) { 1598 error_setg(errp, "Missing CRYPTO header for crypt method %d", 1599 s->crypt_method_header); 1600 ret = -EINVAL; 1601 goto fail; 1602 } 1603 } 1604 1605 /* read the backing file name */ 1606 if (header.backing_file_offset != 0) { 1607 len = header.backing_file_size; 1608 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) || 1609 len >= sizeof(bs->backing_file)) { 1610 error_setg(errp, "Backing file name too long"); 1611 ret = -EINVAL; 1612 goto fail; 1613 } 1614 ret = bdrv_pread(bs->file, header.backing_file_offset, 1615 bs->auto_backing_file, len); 1616 if (ret < 0) { 1617 error_setg_errno(errp, -ret, "Could not read backing file name"); 1618 goto fail; 1619 } 1620 bs->auto_backing_file[len] = '\0'; 1621 pstrcpy(bs->backing_file, sizeof(bs->backing_file), 1622 bs->auto_backing_file); 1623 s->image_backing_file = g_strdup(bs->auto_backing_file); 1624 } 1625 1626 /* 1627 * Internal snapshots; skip reading them in check mode, because 1628 * we do not need them then, and we do not want to abort because 1629 * of a broken table. 1630 */ 1631 if (!(flags & BDRV_O_CHECK)) { 1632 s->snapshots_offset = header.snapshots_offset; 1633 s->nb_snapshots = header.nb_snapshots; 1634 1635 ret = qcow2_read_snapshots(bs, errp); 1636 if (ret < 0) { 1637 goto fail; 1638 } 1639 } 1640 1641 /* Clear unknown autoclear feature bits */ 1642 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK; 1643 update_header = 1644 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE); 1645 if (update_header) { 1646 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK; 1647 } 1648 1649 /* == Handle persistent dirty bitmaps == 1650 * 1651 * We want load dirty bitmaps in three cases: 1652 * 1653 * 1. Normal open of the disk in active mode, not related to invalidation 1654 * after migration. 1655 * 1656 * 2. Invalidation of the target vm after pre-copy phase of migration, if 1657 * bitmaps are _not_ migrating through migration channel, i.e. 1658 * 'dirty-bitmaps' capability is disabled. 1659 * 1660 * 3. Invalidation of source vm after failed or canceled migration. 1661 * This is a very interesting case. There are two possible types of 1662 * bitmaps: 1663 * 1664 * A. Stored on inactivation and removed. They should be loaded from the 1665 * image. 1666 * 1667 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through 1668 * the migration channel (with dirty-bitmaps capability). 1669 * 1670 * On the other hand, there are two possible sub-cases: 1671 * 1672 * 3.1 disk was changed by somebody else while were inactive. In this 1673 * case all in-RAM dirty bitmaps (both persistent and not) are 1674 * definitely invalid. And we don't have any method to determine 1675 * this. 1676 * 1677 * Simple and safe thing is to just drop all the bitmaps of type B on 1678 * inactivation. But in this case we lose bitmaps in valid 4.2 case. 1679 * 1680 * On the other hand, resuming source vm, if disk was already changed 1681 * is a bad thing anyway: not only bitmaps, the whole vm state is 1682 * out of sync with disk. 1683 * 1684 * This means, that user or management tool, who for some reason 1685 * decided to resume source vm, after disk was already changed by 1686 * target vm, should at least drop all dirty bitmaps by hand. 1687 * 1688 * So, we can ignore this case for now, but TODO: "generation" 1689 * extension for qcow2, to determine, that image was changed after 1690 * last inactivation. And if it is changed, we will drop (or at least 1691 * mark as 'invalid' all the bitmaps of type B, both persistent 1692 * and not). 1693 * 1694 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved 1695 * to disk ('dirty-bitmaps' capability disabled), or not saved 1696 * ('dirty-bitmaps' capability enabled), but we don't need to care 1697 * of: let's load bitmaps as always: stored bitmaps will be loaded, 1698 * and not stored has flag IN_USE=1 in the image and will be skipped 1699 * on loading. 1700 * 1701 * One remaining possible case when we don't want load bitmaps: 1702 * 1703 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or 1704 * will be loaded on invalidation, no needs try loading them before) 1705 */ 1706 1707 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) { 1708 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */ 1709 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err); 1710 if (local_err != NULL) { 1711 error_propagate(errp, local_err); 1712 ret = -EINVAL; 1713 goto fail; 1714 } 1715 1716 update_header = update_header && !header_updated; 1717 } 1718 1719 if (update_header) { 1720 ret = qcow2_update_header(bs); 1721 if (ret < 0) { 1722 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 1723 goto fail; 1724 } 1725 } 1726 1727 bs->supported_zero_flags = header.version >= 3 ? 1728 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0; 1729 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; 1730 1731 /* Repair image if dirty */ 1732 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only && 1733 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 1734 BdrvCheckResult result = {0}; 1735 1736 ret = qcow2_co_check_locked(bs, &result, 1737 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS); 1738 if (ret < 0 || result.check_errors) { 1739 if (ret >= 0) { 1740 ret = -EIO; 1741 } 1742 error_setg_errno(errp, -ret, "Could not repair dirty image"); 1743 goto fail; 1744 } 1745 } 1746 1747 #ifdef DEBUG_ALLOC 1748 { 1749 BdrvCheckResult result = {0}; 1750 qcow2_check_refcounts(bs, &result, 0); 1751 } 1752 #endif 1753 1754 qemu_co_queue_init(&s->thread_task_queue); 1755 1756 return ret; 1757 1758 fail: 1759 g_free(s->image_data_file); 1760 if (has_data_file(bs)) { 1761 bdrv_unref_child(bs, s->data_file); 1762 s->data_file = NULL; 1763 } 1764 g_free(s->unknown_header_fields); 1765 cleanup_unknown_header_ext(bs); 1766 qcow2_free_snapshots(bs); 1767 qcow2_refcount_close(bs); 1768 qemu_vfree(s->l1_table); 1769 /* else pre-write overlap checks in cache_destroy may crash */ 1770 s->l1_table = NULL; 1771 cache_clean_timer_del(bs); 1772 if (s->l2_table_cache) { 1773 qcow2_cache_destroy(s->l2_table_cache); 1774 } 1775 if (s->refcount_block_cache) { 1776 qcow2_cache_destroy(s->refcount_block_cache); 1777 } 1778 qcrypto_block_free(s->crypto); 1779 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 1780 return ret; 1781 } 1782 1783 typedef struct QCow2OpenCo { 1784 BlockDriverState *bs; 1785 QDict *options; 1786 int flags; 1787 Error **errp; 1788 int ret; 1789 } QCow2OpenCo; 1790 1791 static void coroutine_fn qcow2_open_entry(void *opaque) 1792 { 1793 QCow2OpenCo *qoc = opaque; 1794 BDRVQcow2State *s = qoc->bs->opaque; 1795 1796 qemu_co_mutex_lock(&s->lock); 1797 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); 1798 qemu_co_mutex_unlock(&s->lock); 1799 } 1800 1801 static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, 1802 Error **errp) 1803 { 1804 BDRVQcow2State *s = bs->opaque; 1805 QCow2OpenCo qoc = { 1806 .bs = bs, 1807 .options = options, 1808 .flags = flags, 1809 .errp = errp, 1810 .ret = -EINPROGRESS 1811 }; 1812 1813 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, 1814 false, errp); 1815 if (!bs->file) { 1816 return -EINVAL; 1817 } 1818 1819 /* Initialise locks */ 1820 qemu_co_mutex_init(&s->lock); 1821 1822 if (qemu_in_coroutine()) { 1823 /* From bdrv_co_create. */ 1824 qcow2_open_entry(&qoc); 1825 } else { 1826 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1827 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc)); 1828 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS); 1829 } 1830 return qoc.ret; 1831 } 1832 1833 static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp) 1834 { 1835 BDRVQcow2State *s = bs->opaque; 1836 1837 if (bs->encrypted) { 1838 /* Encryption works on a sector granularity */ 1839 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto); 1840 } 1841 bs->bl.pwrite_zeroes_alignment = s->cluster_size; 1842 bs->bl.pdiscard_alignment = s->cluster_size; 1843 } 1844 1845 static int qcow2_reopen_prepare(BDRVReopenState *state, 1846 BlockReopenQueue *queue, Error **errp) 1847 { 1848 Qcow2ReopenState *r; 1849 int ret; 1850 1851 r = g_new0(Qcow2ReopenState, 1); 1852 state->opaque = r; 1853 1854 ret = qcow2_update_options_prepare(state->bs, r, state->options, 1855 state->flags, errp); 1856 if (ret < 0) { 1857 goto fail; 1858 } 1859 1860 /* We need to write out any unwritten data if we reopen read-only. */ 1861 if ((state->flags & BDRV_O_RDWR) == 0) { 1862 ret = qcow2_reopen_bitmaps_ro(state->bs, errp); 1863 if (ret < 0) { 1864 goto fail; 1865 } 1866 1867 ret = bdrv_flush(state->bs); 1868 if (ret < 0) { 1869 goto fail; 1870 } 1871 1872 ret = qcow2_mark_clean(state->bs); 1873 if (ret < 0) { 1874 goto fail; 1875 } 1876 } 1877 1878 return 0; 1879 1880 fail: 1881 qcow2_update_options_abort(state->bs, r); 1882 g_free(r); 1883 return ret; 1884 } 1885 1886 static void qcow2_reopen_commit(BDRVReopenState *state) 1887 { 1888 qcow2_update_options_commit(state->bs, state->opaque); 1889 g_free(state->opaque); 1890 } 1891 1892 static void qcow2_reopen_commit_post(BDRVReopenState *state) 1893 { 1894 if (state->flags & BDRV_O_RDWR) { 1895 Error *local_err = NULL; 1896 1897 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) { 1898 /* 1899 * This is not fatal, bitmaps just left read-only, so all following 1900 * writes will fail. User can remove read-only bitmaps to unblock 1901 * writes or retry reopen. 1902 */ 1903 error_reportf_err(local_err, 1904 "%s: Failed to make dirty bitmaps writable: ", 1905 bdrv_get_node_name(state->bs)); 1906 } 1907 } 1908 } 1909 1910 static void qcow2_reopen_abort(BDRVReopenState *state) 1911 { 1912 qcow2_update_options_abort(state->bs, state->opaque); 1913 g_free(state->opaque); 1914 } 1915 1916 static void qcow2_join_options(QDict *options, QDict *old_options) 1917 { 1918 bool has_new_overlap_template = 1919 qdict_haskey(options, QCOW2_OPT_OVERLAP) || 1920 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE); 1921 bool has_new_total_cache_size = 1922 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE); 1923 bool has_all_cache_options; 1924 1925 /* New overlap template overrides all old overlap options */ 1926 if (has_new_overlap_template) { 1927 qdict_del(old_options, QCOW2_OPT_OVERLAP); 1928 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE); 1929 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER); 1930 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1); 1931 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2); 1932 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE); 1933 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK); 1934 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE); 1935 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1); 1936 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2); 1937 } 1938 1939 /* New total cache size overrides all old options */ 1940 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) { 1941 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE); 1942 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1943 } 1944 1945 qdict_join(options, old_options, false); 1946 1947 /* 1948 * If after merging all cache size options are set, an old total size is 1949 * overwritten. Do keep all options, however, if all three are new. The 1950 * resulting error message is what we want to happen. 1951 */ 1952 has_all_cache_options = 1953 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) || 1954 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) || 1955 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE); 1956 1957 if (has_all_cache_options && !has_new_total_cache_size) { 1958 qdict_del(options, QCOW2_OPT_CACHE_SIZE); 1959 } 1960 } 1961 1962 static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, 1963 bool want_zero, 1964 int64_t offset, int64_t count, 1965 int64_t *pnum, int64_t *map, 1966 BlockDriverState **file) 1967 { 1968 BDRVQcow2State *s = bs->opaque; 1969 uint64_t cluster_offset; 1970 unsigned int bytes; 1971 int ret, status = 0; 1972 1973 qemu_co_mutex_lock(&s->lock); 1974 1975 if (!s->metadata_preallocation_checked) { 1976 ret = qcow2_detect_metadata_preallocation(bs); 1977 s->metadata_preallocation = (ret == 1); 1978 s->metadata_preallocation_checked = true; 1979 } 1980 1981 bytes = MIN(INT_MAX, count); 1982 ret = qcow2_get_cluster_offset(bs, offset, &bytes, &cluster_offset); 1983 qemu_co_mutex_unlock(&s->lock); 1984 if (ret < 0) { 1985 return ret; 1986 } 1987 1988 *pnum = bytes; 1989 1990 if ((ret == QCOW2_CLUSTER_NORMAL || ret == QCOW2_CLUSTER_ZERO_ALLOC) && 1991 !s->crypto) { 1992 *map = cluster_offset | offset_into_cluster(s, offset); 1993 *file = s->data_file->bs; 1994 status |= BDRV_BLOCK_OFFSET_VALID; 1995 } 1996 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || ret == QCOW2_CLUSTER_ZERO_ALLOC) { 1997 status |= BDRV_BLOCK_ZERO; 1998 } else if (ret != QCOW2_CLUSTER_UNALLOCATED) { 1999 status |= BDRV_BLOCK_DATA; 2000 } 2001 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) && 2002 (status & BDRV_BLOCK_OFFSET_VALID)) 2003 { 2004 status |= BDRV_BLOCK_RECURSE; 2005 } 2006 return status; 2007 } 2008 2009 static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, 2010 QCowL2Meta **pl2meta, 2011 bool link_l2) 2012 { 2013 int ret = 0; 2014 QCowL2Meta *l2meta = *pl2meta; 2015 2016 while (l2meta != NULL) { 2017 QCowL2Meta *next; 2018 2019 if (link_l2) { 2020 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 2021 if (ret) { 2022 goto out; 2023 } 2024 } else { 2025 qcow2_alloc_cluster_abort(bs, l2meta); 2026 } 2027 2028 /* Take the request off the list of running requests */ 2029 if (l2meta->nb_clusters != 0) { 2030 QLIST_REMOVE(l2meta, next_in_flight); 2031 } 2032 2033 qemu_co_queue_restart_all(&l2meta->dependent_requests); 2034 2035 next = l2meta->next; 2036 g_free(l2meta); 2037 l2meta = next; 2038 } 2039 out: 2040 *pl2meta = l2meta; 2041 return ret; 2042 } 2043 2044 static coroutine_fn int 2045 qcow2_co_preadv_encrypted(BlockDriverState *bs, 2046 uint64_t file_cluster_offset, 2047 uint64_t offset, 2048 uint64_t bytes, 2049 QEMUIOVector *qiov, 2050 uint64_t qiov_offset) 2051 { 2052 int ret; 2053 BDRVQcow2State *s = bs->opaque; 2054 uint8_t *buf; 2055 2056 assert(bs->encrypted && s->crypto); 2057 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2058 2059 /* 2060 * For encrypted images, read everything into a temporary 2061 * contiguous buffer on which the AES functions can work. 2062 * Also, decryption in a separate buffer is better as it 2063 * prevents the guest from learning information about the 2064 * encrypted nature of the virtual disk. 2065 */ 2066 2067 buf = qemu_try_blockalign(s->data_file->bs, bytes); 2068 if (buf == NULL) { 2069 return -ENOMEM; 2070 } 2071 2072 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2073 ret = bdrv_co_pread(s->data_file, 2074 file_cluster_offset + offset_into_cluster(s, offset), 2075 bytes, buf, 0); 2076 if (ret < 0) { 2077 goto fail; 2078 } 2079 2080 if (qcow2_co_decrypt(bs, 2081 file_cluster_offset + offset_into_cluster(s, offset), 2082 offset, buf, bytes) < 0) 2083 { 2084 ret = -EIO; 2085 goto fail; 2086 } 2087 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes); 2088 2089 fail: 2090 qemu_vfree(buf); 2091 2092 return ret; 2093 } 2094 2095 typedef struct Qcow2AioTask { 2096 AioTask task; 2097 2098 BlockDriverState *bs; 2099 QCow2ClusterType cluster_type; /* only for read */ 2100 uint64_t file_cluster_offset; 2101 uint64_t offset; 2102 uint64_t bytes; 2103 QEMUIOVector *qiov; 2104 uint64_t qiov_offset; 2105 QCowL2Meta *l2meta; /* only for write */ 2106 } Qcow2AioTask; 2107 2108 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task); 2109 static coroutine_fn int qcow2_add_task(BlockDriverState *bs, 2110 AioTaskPool *pool, 2111 AioTaskFunc func, 2112 QCow2ClusterType cluster_type, 2113 uint64_t file_cluster_offset, 2114 uint64_t offset, 2115 uint64_t bytes, 2116 QEMUIOVector *qiov, 2117 size_t qiov_offset, 2118 QCowL2Meta *l2meta) 2119 { 2120 Qcow2AioTask local_task; 2121 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task; 2122 2123 *task = (Qcow2AioTask) { 2124 .task.func = func, 2125 .bs = bs, 2126 .cluster_type = cluster_type, 2127 .qiov = qiov, 2128 .file_cluster_offset = file_cluster_offset, 2129 .offset = offset, 2130 .bytes = bytes, 2131 .qiov_offset = qiov_offset, 2132 .l2meta = l2meta, 2133 }; 2134 2135 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool, 2136 func == qcow2_co_preadv_task_entry ? "read" : "write", 2137 cluster_type, file_cluster_offset, offset, bytes, 2138 qiov, qiov_offset); 2139 2140 if (!pool) { 2141 return func(&task->task); 2142 } 2143 2144 aio_task_pool_start_task(pool, &task->task); 2145 2146 return 0; 2147 } 2148 2149 static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs, 2150 QCow2ClusterType cluster_type, 2151 uint64_t file_cluster_offset, 2152 uint64_t offset, uint64_t bytes, 2153 QEMUIOVector *qiov, 2154 size_t qiov_offset) 2155 { 2156 BDRVQcow2State *s = bs->opaque; 2157 int offset_in_cluster = offset_into_cluster(s, offset); 2158 2159 switch (cluster_type) { 2160 case QCOW2_CLUSTER_ZERO_PLAIN: 2161 case QCOW2_CLUSTER_ZERO_ALLOC: 2162 /* Both zero types are handled in qcow2_co_preadv_part */ 2163 g_assert_not_reached(); 2164 2165 case QCOW2_CLUSTER_UNALLOCATED: 2166 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */ 2167 2168 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 2169 return bdrv_co_preadv_part(bs->backing, offset, bytes, 2170 qiov, qiov_offset, 0); 2171 2172 case QCOW2_CLUSTER_COMPRESSED: 2173 return qcow2_co_preadv_compressed(bs, file_cluster_offset, 2174 offset, bytes, qiov, qiov_offset); 2175 2176 case QCOW2_CLUSTER_NORMAL: 2177 assert(offset_into_cluster(s, file_cluster_offset) == 0); 2178 if (bs->encrypted) { 2179 return qcow2_co_preadv_encrypted(bs, file_cluster_offset, 2180 offset, bytes, qiov, qiov_offset); 2181 } 2182 2183 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 2184 return bdrv_co_preadv_part(s->data_file, 2185 file_cluster_offset + offset_in_cluster, 2186 bytes, qiov, qiov_offset, 0); 2187 2188 default: 2189 g_assert_not_reached(); 2190 } 2191 2192 g_assert_not_reached(); 2193 } 2194 2195 static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task) 2196 { 2197 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2198 2199 assert(!t->l2meta); 2200 2201 return qcow2_co_preadv_task(t->bs, t->cluster_type, t->file_cluster_offset, 2202 t->offset, t->bytes, t->qiov, t->qiov_offset); 2203 } 2204 2205 static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, 2206 uint64_t offset, uint64_t bytes, 2207 QEMUIOVector *qiov, 2208 size_t qiov_offset, int flags) 2209 { 2210 BDRVQcow2State *s = bs->opaque; 2211 int ret = 0; 2212 unsigned int cur_bytes; /* number of bytes in current iteration */ 2213 uint64_t cluster_offset = 0; 2214 AioTaskPool *aio = NULL; 2215 2216 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2217 /* prepare next request */ 2218 cur_bytes = MIN(bytes, INT_MAX); 2219 if (s->crypto) { 2220 cur_bytes = MIN(cur_bytes, 2221 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2222 } 2223 2224 qemu_co_mutex_lock(&s->lock); 2225 ret = qcow2_get_cluster_offset(bs, offset, &cur_bytes, &cluster_offset); 2226 qemu_co_mutex_unlock(&s->lock); 2227 if (ret < 0) { 2228 goto out; 2229 } 2230 2231 if (ret == QCOW2_CLUSTER_ZERO_PLAIN || 2232 ret == QCOW2_CLUSTER_ZERO_ALLOC || 2233 (ret == QCOW2_CLUSTER_UNALLOCATED && !bs->backing)) 2234 { 2235 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes); 2236 } else { 2237 if (!aio && cur_bytes != bytes) { 2238 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2239 } 2240 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, ret, 2241 cluster_offset, offset, cur_bytes, 2242 qiov, qiov_offset, NULL); 2243 if (ret < 0) { 2244 goto out; 2245 } 2246 } 2247 2248 bytes -= cur_bytes; 2249 offset += cur_bytes; 2250 qiov_offset += cur_bytes; 2251 } 2252 2253 out: 2254 if (aio) { 2255 aio_task_pool_wait_all(aio); 2256 if (ret == 0) { 2257 ret = aio_task_pool_status(aio); 2258 } 2259 g_free(aio); 2260 } 2261 2262 return ret; 2263 } 2264 2265 /* Check if it's possible to merge a write request with the writing of 2266 * the data from the COW regions */ 2267 static bool merge_cow(uint64_t offset, unsigned bytes, 2268 QEMUIOVector *qiov, size_t qiov_offset, 2269 QCowL2Meta *l2meta) 2270 { 2271 QCowL2Meta *m; 2272 2273 for (m = l2meta; m != NULL; m = m->next) { 2274 /* If both COW regions are empty then there's nothing to merge */ 2275 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) { 2276 continue; 2277 } 2278 2279 /* If COW regions are handled already, skip this too */ 2280 if (m->skip_cow) { 2281 continue; 2282 } 2283 2284 /* The data (middle) region must be immediately after the 2285 * start region */ 2286 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) { 2287 continue; 2288 } 2289 2290 /* The end region must be immediately after the data (middle) 2291 * region */ 2292 if (m->offset + m->cow_end.offset != offset + bytes) { 2293 continue; 2294 } 2295 2296 /* Make sure that adding both COW regions to the QEMUIOVector 2297 * does not exceed IOV_MAX */ 2298 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) { 2299 continue; 2300 } 2301 2302 m->data_qiov = qiov; 2303 m->data_qiov_offset = qiov_offset; 2304 return true; 2305 } 2306 2307 return false; 2308 } 2309 2310 static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes) 2311 { 2312 int64_t nr; 2313 return !bytes || 2314 (!bdrv_is_allocated_above(bs, NULL, false, offset, bytes, &nr) && 2315 nr == bytes); 2316 } 2317 2318 static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m) 2319 { 2320 /* 2321 * This check is designed for optimization shortcut so it must be 2322 * efficient. 2323 * Instead of is_zero(), use is_unallocated() as it is faster (but not 2324 * as accurate and can result in false negatives). 2325 */ 2326 return is_unallocated(bs, m->offset + m->cow_start.offset, 2327 m->cow_start.nb_bytes) && 2328 is_unallocated(bs, m->offset + m->cow_end.offset, 2329 m->cow_end.nb_bytes); 2330 } 2331 2332 static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta) 2333 { 2334 BDRVQcow2State *s = bs->opaque; 2335 QCowL2Meta *m; 2336 2337 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) { 2338 return 0; 2339 } 2340 2341 if (bs->encrypted) { 2342 return 0; 2343 } 2344 2345 for (m = l2meta; m != NULL; m = m->next) { 2346 int ret; 2347 2348 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) { 2349 continue; 2350 } 2351 2352 if (!is_zero_cow(bs, m)) { 2353 continue; 2354 } 2355 2356 /* 2357 * instead of writing zero COW buffers, 2358 * efficiently zero out the whole clusters 2359 */ 2360 2361 ret = qcow2_pre_write_overlap_check(bs, 0, m->alloc_offset, 2362 m->nb_clusters * s->cluster_size, 2363 true); 2364 if (ret < 0) { 2365 return ret; 2366 } 2367 2368 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE); 2369 ret = bdrv_co_pwrite_zeroes(s->data_file, m->alloc_offset, 2370 m->nb_clusters * s->cluster_size, 2371 BDRV_REQ_NO_FALLBACK); 2372 if (ret < 0) { 2373 if (ret != -ENOTSUP && ret != -EAGAIN) { 2374 return ret; 2375 } 2376 continue; 2377 } 2378 2379 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters); 2380 m->skip_cow = true; 2381 } 2382 return 0; 2383 } 2384 2385 /* 2386 * qcow2_co_pwritev_task 2387 * Called with s->lock unlocked 2388 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must 2389 * not use it somehow after qcow2_co_pwritev_task() call 2390 */ 2391 static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs, 2392 uint64_t file_cluster_offset, 2393 uint64_t offset, uint64_t bytes, 2394 QEMUIOVector *qiov, 2395 uint64_t qiov_offset, 2396 QCowL2Meta *l2meta) 2397 { 2398 int ret; 2399 BDRVQcow2State *s = bs->opaque; 2400 void *crypt_buf = NULL; 2401 int offset_in_cluster = offset_into_cluster(s, offset); 2402 QEMUIOVector encrypted_qiov; 2403 2404 if (bs->encrypted) { 2405 assert(s->crypto); 2406 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 2407 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes); 2408 if (crypt_buf == NULL) { 2409 ret = -ENOMEM; 2410 goto out_unlocked; 2411 } 2412 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes); 2413 2414 if (qcow2_co_encrypt(bs, file_cluster_offset + offset_in_cluster, 2415 offset, crypt_buf, bytes) < 0) 2416 { 2417 ret = -EIO; 2418 goto out_unlocked; 2419 } 2420 2421 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes); 2422 qiov = &encrypted_qiov; 2423 qiov_offset = 0; 2424 } 2425 2426 /* Try to efficiently initialize the physical space with zeroes */ 2427 ret = handle_alloc_space(bs, l2meta); 2428 if (ret < 0) { 2429 goto out_unlocked; 2430 } 2431 2432 /* 2433 * If we need to do COW, check if it's possible to merge the 2434 * writing of the guest data together with that of the COW regions. 2435 * If it's not possible (or not necessary) then write the 2436 * guest data now. 2437 */ 2438 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) { 2439 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 2440 trace_qcow2_writev_data(qemu_coroutine_self(), 2441 file_cluster_offset + offset_in_cluster); 2442 ret = bdrv_co_pwritev_part(s->data_file, 2443 file_cluster_offset + offset_in_cluster, 2444 bytes, qiov, qiov_offset, 0); 2445 if (ret < 0) { 2446 goto out_unlocked; 2447 } 2448 } 2449 2450 qemu_co_mutex_lock(&s->lock); 2451 2452 ret = qcow2_handle_l2meta(bs, &l2meta, true); 2453 goto out_locked; 2454 2455 out_unlocked: 2456 qemu_co_mutex_lock(&s->lock); 2457 2458 out_locked: 2459 qcow2_handle_l2meta(bs, &l2meta, false); 2460 qemu_co_mutex_unlock(&s->lock); 2461 2462 qemu_vfree(crypt_buf); 2463 2464 return ret; 2465 } 2466 2467 static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task) 2468 { 2469 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 2470 2471 assert(!t->cluster_type); 2472 2473 return qcow2_co_pwritev_task(t->bs, t->file_cluster_offset, 2474 t->offset, t->bytes, t->qiov, t->qiov_offset, 2475 t->l2meta); 2476 } 2477 2478 static coroutine_fn int qcow2_co_pwritev_part( 2479 BlockDriverState *bs, uint64_t offset, uint64_t bytes, 2480 QEMUIOVector *qiov, size_t qiov_offset, int flags) 2481 { 2482 BDRVQcow2State *s = bs->opaque; 2483 int offset_in_cluster; 2484 int ret; 2485 unsigned int cur_bytes; /* number of sectors in current iteration */ 2486 uint64_t cluster_offset; 2487 QCowL2Meta *l2meta = NULL; 2488 AioTaskPool *aio = NULL; 2489 2490 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes); 2491 2492 while (bytes != 0 && aio_task_pool_status(aio) == 0) { 2493 2494 l2meta = NULL; 2495 2496 trace_qcow2_writev_start_part(qemu_coroutine_self()); 2497 offset_in_cluster = offset_into_cluster(s, offset); 2498 cur_bytes = MIN(bytes, INT_MAX); 2499 if (bs->encrypted) { 2500 cur_bytes = MIN(cur_bytes, 2501 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 2502 - offset_in_cluster); 2503 } 2504 2505 qemu_co_mutex_lock(&s->lock); 2506 2507 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 2508 &cluster_offset, &l2meta); 2509 if (ret < 0) { 2510 goto out_locked; 2511 } 2512 2513 assert(offset_into_cluster(s, cluster_offset) == 0); 2514 2515 ret = qcow2_pre_write_overlap_check(bs, 0, 2516 cluster_offset + offset_in_cluster, 2517 cur_bytes, true); 2518 if (ret < 0) { 2519 goto out_locked; 2520 } 2521 2522 qemu_co_mutex_unlock(&s->lock); 2523 2524 if (!aio && cur_bytes != bytes) { 2525 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 2526 } 2527 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0, 2528 cluster_offset, offset, cur_bytes, 2529 qiov, qiov_offset, l2meta); 2530 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */ 2531 if (ret < 0) { 2532 goto fail_nometa; 2533 } 2534 2535 bytes -= cur_bytes; 2536 offset += cur_bytes; 2537 qiov_offset += cur_bytes; 2538 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes); 2539 } 2540 ret = 0; 2541 2542 qemu_co_mutex_lock(&s->lock); 2543 2544 out_locked: 2545 qcow2_handle_l2meta(bs, &l2meta, false); 2546 2547 qemu_co_mutex_unlock(&s->lock); 2548 2549 fail_nometa: 2550 if (aio) { 2551 aio_task_pool_wait_all(aio); 2552 if (ret == 0) { 2553 ret = aio_task_pool_status(aio); 2554 } 2555 g_free(aio); 2556 } 2557 2558 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 2559 2560 return ret; 2561 } 2562 2563 static int qcow2_inactivate(BlockDriverState *bs) 2564 { 2565 BDRVQcow2State *s = bs->opaque; 2566 int ret, result = 0; 2567 Error *local_err = NULL; 2568 2569 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err); 2570 if (local_err != NULL) { 2571 result = -EINVAL; 2572 error_reportf_err(local_err, "Lost persistent bitmaps during " 2573 "inactivation of node '%s': ", 2574 bdrv_get_device_or_node_name(bs)); 2575 } 2576 2577 ret = qcow2_cache_flush(bs, s->l2_table_cache); 2578 if (ret) { 2579 result = ret; 2580 error_report("Failed to flush the L2 table cache: %s", 2581 strerror(-ret)); 2582 } 2583 2584 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2585 if (ret) { 2586 result = ret; 2587 error_report("Failed to flush the refcount block cache: %s", 2588 strerror(-ret)); 2589 } 2590 2591 if (result == 0) { 2592 qcow2_mark_clean(bs); 2593 } 2594 2595 return result; 2596 } 2597 2598 static void qcow2_close(BlockDriverState *bs) 2599 { 2600 BDRVQcow2State *s = bs->opaque; 2601 qemu_vfree(s->l1_table); 2602 /* else pre-write overlap checks in cache_destroy may crash */ 2603 s->l1_table = NULL; 2604 2605 if (!(s->flags & BDRV_O_INACTIVE)) { 2606 qcow2_inactivate(bs); 2607 } 2608 2609 cache_clean_timer_del(bs); 2610 qcow2_cache_destroy(s->l2_table_cache); 2611 qcow2_cache_destroy(s->refcount_block_cache); 2612 2613 qcrypto_block_free(s->crypto); 2614 s->crypto = NULL; 2615 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts); 2616 2617 g_free(s->unknown_header_fields); 2618 cleanup_unknown_header_ext(bs); 2619 2620 g_free(s->image_data_file); 2621 g_free(s->image_backing_file); 2622 g_free(s->image_backing_format); 2623 2624 if (has_data_file(bs)) { 2625 bdrv_unref_child(bs, s->data_file); 2626 s->data_file = NULL; 2627 } 2628 2629 qcow2_refcount_close(bs); 2630 qcow2_free_snapshots(bs); 2631 } 2632 2633 static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, 2634 Error **errp) 2635 { 2636 BDRVQcow2State *s = bs->opaque; 2637 int flags = s->flags; 2638 QCryptoBlock *crypto = NULL; 2639 QDict *options; 2640 Error *local_err = NULL; 2641 int ret; 2642 2643 /* 2644 * Backing files are read-only which makes all of their metadata immutable, 2645 * that means we don't have to worry about reopening them here. 2646 */ 2647 2648 crypto = s->crypto; 2649 s->crypto = NULL; 2650 2651 qcow2_close(bs); 2652 2653 memset(s, 0, sizeof(BDRVQcow2State)); 2654 options = qdict_clone_shallow(bs->options); 2655 2656 flags &= ~BDRV_O_INACTIVE; 2657 qemu_co_mutex_lock(&s->lock); 2658 ret = qcow2_do_open(bs, options, flags, &local_err); 2659 qemu_co_mutex_unlock(&s->lock); 2660 qobject_unref(options); 2661 if (local_err) { 2662 error_propagate_prepend(errp, local_err, 2663 "Could not reopen qcow2 layer: "); 2664 bs->drv = NULL; 2665 return; 2666 } else if (ret < 0) { 2667 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer"); 2668 bs->drv = NULL; 2669 return; 2670 } 2671 2672 s->crypto = crypto; 2673 } 2674 2675 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 2676 size_t len, size_t buflen) 2677 { 2678 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 2679 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 2680 2681 if (buflen < ext_len) { 2682 return -ENOSPC; 2683 } 2684 2685 *ext_backing_fmt = (QCowExtension) { 2686 .magic = cpu_to_be32(magic), 2687 .len = cpu_to_be32(len), 2688 }; 2689 2690 if (len) { 2691 memcpy(buf + sizeof(QCowExtension), s, len); 2692 } 2693 2694 return ext_len; 2695 } 2696 2697 /* 2698 * Updates the qcow2 header, including the variable length parts of it, i.e. 2699 * the backing file name and all extensions. qcow2 was not designed to allow 2700 * such changes, so if we run out of space (we can only use the first cluster) 2701 * this function may fail. 2702 * 2703 * Returns 0 on success, -errno in error cases. 2704 */ 2705 int qcow2_update_header(BlockDriverState *bs) 2706 { 2707 BDRVQcow2State *s = bs->opaque; 2708 QCowHeader *header; 2709 char *buf; 2710 size_t buflen = s->cluster_size; 2711 int ret; 2712 uint64_t total_size; 2713 uint32_t refcount_table_clusters; 2714 size_t header_length; 2715 Qcow2UnknownHeaderExtension *uext; 2716 2717 buf = qemu_blockalign(bs, buflen); 2718 2719 /* Header structure */ 2720 header = (QCowHeader*) buf; 2721 2722 if (buflen < sizeof(*header)) { 2723 ret = -ENOSPC; 2724 goto fail; 2725 } 2726 2727 header_length = sizeof(*header) + s->unknown_header_fields_size; 2728 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 2729 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 2730 2731 *header = (QCowHeader) { 2732 /* Version 2 fields */ 2733 .magic = cpu_to_be32(QCOW_MAGIC), 2734 .version = cpu_to_be32(s->qcow_version), 2735 .backing_file_offset = 0, 2736 .backing_file_size = 0, 2737 .cluster_bits = cpu_to_be32(s->cluster_bits), 2738 .size = cpu_to_be64(total_size), 2739 .crypt_method = cpu_to_be32(s->crypt_method_header), 2740 .l1_size = cpu_to_be32(s->l1_size), 2741 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 2742 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 2743 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 2744 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 2745 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 2746 2747 /* Version 3 fields */ 2748 .incompatible_features = cpu_to_be64(s->incompatible_features), 2749 .compatible_features = cpu_to_be64(s->compatible_features), 2750 .autoclear_features = cpu_to_be64(s->autoclear_features), 2751 .refcount_order = cpu_to_be32(s->refcount_order), 2752 .header_length = cpu_to_be32(header_length), 2753 }; 2754 2755 /* For older versions, write a shorter header */ 2756 switch (s->qcow_version) { 2757 case 2: 2758 ret = offsetof(QCowHeader, incompatible_features); 2759 break; 2760 case 3: 2761 ret = sizeof(*header); 2762 break; 2763 default: 2764 ret = -EINVAL; 2765 goto fail; 2766 } 2767 2768 buf += ret; 2769 buflen -= ret; 2770 memset(buf, 0, buflen); 2771 2772 /* Preserve any unknown field in the header */ 2773 if (s->unknown_header_fields_size) { 2774 if (buflen < s->unknown_header_fields_size) { 2775 ret = -ENOSPC; 2776 goto fail; 2777 } 2778 2779 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 2780 buf += s->unknown_header_fields_size; 2781 buflen -= s->unknown_header_fields_size; 2782 } 2783 2784 /* Backing file format header extension */ 2785 if (s->image_backing_format) { 2786 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 2787 s->image_backing_format, 2788 strlen(s->image_backing_format), 2789 buflen); 2790 if (ret < 0) { 2791 goto fail; 2792 } 2793 2794 buf += ret; 2795 buflen -= ret; 2796 } 2797 2798 /* External data file header extension */ 2799 if (has_data_file(bs) && s->image_data_file) { 2800 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE, 2801 s->image_data_file, strlen(s->image_data_file), 2802 buflen); 2803 if (ret < 0) { 2804 goto fail; 2805 } 2806 2807 buf += ret; 2808 buflen -= ret; 2809 } 2810 2811 /* Full disk encryption header pointer extension */ 2812 if (s->crypto_header.offset != 0) { 2813 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset); 2814 s->crypto_header.length = cpu_to_be64(s->crypto_header.length); 2815 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER, 2816 &s->crypto_header, sizeof(s->crypto_header), 2817 buflen); 2818 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset); 2819 s->crypto_header.length = be64_to_cpu(s->crypto_header.length); 2820 if (ret < 0) { 2821 goto fail; 2822 } 2823 buf += ret; 2824 buflen -= ret; 2825 } 2826 2827 /* 2828 * Feature table. A mere 8 feature names occupies 392 bytes, and 2829 * when coupled with the v3 minimum header of 104 bytes plus the 2830 * 8-byte end-of-extension marker, that would leave only 8 bytes 2831 * for a backing file name in an image with 512-byte clusters. 2832 * Thus, we choose to omit this header for cluster sizes 4k and 2833 * smaller. 2834 */ 2835 if (s->qcow_version >= 3 && s->cluster_size > 4096) { 2836 static const Qcow2Feature features[] = { 2837 { 2838 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2839 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 2840 .name = "dirty bit", 2841 }, 2842 { 2843 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2844 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR, 2845 .name = "corrupt bit", 2846 }, 2847 { 2848 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 2849 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR, 2850 .name = "external data file", 2851 }, 2852 { 2853 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 2854 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 2855 .name = "lazy refcounts", 2856 }, 2857 { 2858 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 2859 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR, 2860 .name = "bitmaps", 2861 }, 2862 { 2863 .type = QCOW2_FEAT_TYPE_AUTOCLEAR, 2864 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR, 2865 .name = "raw external data", 2866 }, 2867 }; 2868 2869 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 2870 features, sizeof(features), buflen); 2871 if (ret < 0) { 2872 goto fail; 2873 } 2874 buf += ret; 2875 buflen -= ret; 2876 } 2877 2878 /* Bitmap extension */ 2879 if (s->nb_bitmaps > 0) { 2880 Qcow2BitmapHeaderExt bitmaps_header = { 2881 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps), 2882 .bitmap_directory_size = 2883 cpu_to_be64(s->bitmap_directory_size), 2884 .bitmap_directory_offset = 2885 cpu_to_be64(s->bitmap_directory_offset) 2886 }; 2887 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS, 2888 &bitmaps_header, sizeof(bitmaps_header), 2889 buflen); 2890 if (ret < 0) { 2891 goto fail; 2892 } 2893 buf += ret; 2894 buflen -= ret; 2895 } 2896 2897 /* Keep unknown header extensions */ 2898 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 2899 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 2900 if (ret < 0) { 2901 goto fail; 2902 } 2903 2904 buf += ret; 2905 buflen -= ret; 2906 } 2907 2908 /* End of header extensions */ 2909 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 2910 if (ret < 0) { 2911 goto fail; 2912 } 2913 2914 buf += ret; 2915 buflen -= ret; 2916 2917 /* Backing file name */ 2918 if (s->image_backing_file) { 2919 size_t backing_file_len = strlen(s->image_backing_file); 2920 2921 if (buflen < backing_file_len) { 2922 ret = -ENOSPC; 2923 goto fail; 2924 } 2925 2926 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 2927 strncpy(buf, s->image_backing_file, buflen); 2928 2929 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 2930 header->backing_file_size = cpu_to_be32(backing_file_len); 2931 } 2932 2933 /* Write the new header */ 2934 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 2935 if (ret < 0) { 2936 goto fail; 2937 } 2938 2939 ret = 0; 2940 fail: 2941 qemu_vfree(header); 2942 return ret; 2943 } 2944 2945 static int qcow2_change_backing_file(BlockDriverState *bs, 2946 const char *backing_file, const char *backing_fmt) 2947 { 2948 BDRVQcow2State *s = bs->opaque; 2949 2950 /* Adding a backing file means that the external data file alone won't be 2951 * enough to make sense of the content */ 2952 if (backing_file && data_file_is_raw(bs)) { 2953 return -EINVAL; 2954 } 2955 2956 if (backing_file && strlen(backing_file) > 1023) { 2957 return -EINVAL; 2958 } 2959 2960 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file), 2961 backing_file ?: ""); 2962 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 2963 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 2964 2965 g_free(s->image_backing_file); 2966 g_free(s->image_backing_format); 2967 2968 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL; 2969 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL; 2970 2971 return qcow2_update_header(bs); 2972 } 2973 2974 static int qcow2_crypt_method_from_format(const char *encryptfmt) 2975 { 2976 if (g_str_equal(encryptfmt, "luks")) { 2977 return QCOW_CRYPT_LUKS; 2978 } else if (g_str_equal(encryptfmt, "aes")) { 2979 return QCOW_CRYPT_AES; 2980 } else { 2981 return -EINVAL; 2982 } 2983 } 2984 2985 static int qcow2_set_up_encryption(BlockDriverState *bs, 2986 QCryptoBlockCreateOptions *cryptoopts, 2987 Error **errp) 2988 { 2989 BDRVQcow2State *s = bs->opaque; 2990 QCryptoBlock *crypto = NULL; 2991 int fmt, ret; 2992 2993 switch (cryptoopts->format) { 2994 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 2995 fmt = QCOW_CRYPT_LUKS; 2996 break; 2997 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 2998 fmt = QCOW_CRYPT_AES; 2999 break; 3000 default: 3001 error_setg(errp, "Crypto format not supported in qcow2"); 3002 return -EINVAL; 3003 } 3004 3005 s->crypt_method_header = fmt; 3006 3007 crypto = qcrypto_block_create(cryptoopts, "encrypt.", 3008 qcow2_crypto_hdr_init_func, 3009 qcow2_crypto_hdr_write_func, 3010 bs, errp); 3011 if (!crypto) { 3012 return -EINVAL; 3013 } 3014 3015 ret = qcow2_update_header(bs); 3016 if (ret < 0) { 3017 error_setg_errno(errp, -ret, "Could not write encryption header"); 3018 goto out; 3019 } 3020 3021 ret = 0; 3022 out: 3023 qcrypto_block_free(crypto); 3024 return ret; 3025 } 3026 3027 /** 3028 * Preallocates metadata structures for data clusters between @offset (in the 3029 * guest disk) and @new_length (which is thus generally the new guest disk 3030 * size). 3031 * 3032 * Returns: 0 on success, -errno on failure. 3033 */ 3034 static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset, 3035 uint64_t new_length, PreallocMode mode, 3036 Error **errp) 3037 { 3038 BDRVQcow2State *s = bs->opaque; 3039 uint64_t bytes; 3040 uint64_t host_offset = 0; 3041 int64_t file_length; 3042 unsigned int cur_bytes; 3043 int ret; 3044 QCowL2Meta *meta; 3045 3046 assert(offset <= new_length); 3047 bytes = new_length - offset; 3048 3049 while (bytes) { 3050 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size)); 3051 ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes, 3052 &host_offset, &meta); 3053 if (ret < 0) { 3054 error_setg_errno(errp, -ret, "Allocating clusters failed"); 3055 return ret; 3056 } 3057 3058 while (meta) { 3059 QCowL2Meta *next = meta->next; 3060 3061 ret = qcow2_alloc_cluster_link_l2(bs, meta); 3062 if (ret < 0) { 3063 error_setg_errno(errp, -ret, "Mapping clusters failed"); 3064 qcow2_free_any_clusters(bs, meta->alloc_offset, 3065 meta->nb_clusters, QCOW2_DISCARD_NEVER); 3066 return ret; 3067 } 3068 3069 /* There are no dependent requests, but we need to remove our 3070 * request from the list of in-flight requests */ 3071 QLIST_REMOVE(meta, next_in_flight); 3072 3073 g_free(meta); 3074 meta = next; 3075 } 3076 3077 /* TODO Preallocate data if requested */ 3078 3079 bytes -= cur_bytes; 3080 offset += cur_bytes; 3081 } 3082 3083 /* 3084 * It is expected that the image file is large enough to actually contain 3085 * all of the allocated clusters (otherwise we get failing reads after 3086 * EOF). Extend the image to the last allocated sector. 3087 */ 3088 file_length = bdrv_getlength(s->data_file->bs); 3089 if (file_length < 0) { 3090 error_setg_errno(errp, -file_length, "Could not get file size"); 3091 return file_length; 3092 } 3093 3094 if (host_offset + cur_bytes > file_length) { 3095 if (mode == PREALLOC_MODE_METADATA) { 3096 mode = PREALLOC_MODE_OFF; 3097 } 3098 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false, 3099 mode, 0, errp); 3100 if (ret < 0) { 3101 return ret; 3102 } 3103 } 3104 3105 return 0; 3106 } 3107 3108 /* qcow2_refcount_metadata_size: 3109 * @clusters: number of clusters to refcount (including data and L1/L2 tables) 3110 * @cluster_size: size of a cluster, in bytes 3111 * @refcount_order: refcount bits power-of-2 exponent 3112 * @generous_increase: allow for the refcount table to be 1.5x as large as it 3113 * needs to be 3114 * 3115 * Returns: Number of bytes required for refcount blocks and table metadata. 3116 */ 3117 int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size, 3118 int refcount_order, bool generous_increase, 3119 uint64_t *refblock_count) 3120 { 3121 /* 3122 * Every host cluster is reference-counted, including metadata (even 3123 * refcount metadata is recursively included). 3124 * 3125 * An accurate formula for the size of refcount metadata size is difficult 3126 * to derive. An easier method of calculation is finding the fixed point 3127 * where no further refcount blocks or table clusters are required to 3128 * reference count every cluster. 3129 */ 3130 int64_t blocks_per_table_cluster = cluster_size / sizeof(uint64_t); 3131 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order); 3132 int64_t table = 0; /* number of refcount table clusters */ 3133 int64_t blocks = 0; /* number of refcount block clusters */ 3134 int64_t last; 3135 int64_t n = 0; 3136 3137 do { 3138 last = n; 3139 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block); 3140 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster); 3141 n = clusters + blocks + table; 3142 3143 if (n == last && generous_increase) { 3144 clusters += DIV_ROUND_UP(table, 2); 3145 n = 0; /* force another loop */ 3146 generous_increase = false; 3147 } 3148 } while (n != last); 3149 3150 if (refblock_count) { 3151 *refblock_count = blocks; 3152 } 3153 3154 return (blocks + table) * cluster_size; 3155 } 3156 3157 /** 3158 * qcow2_calc_prealloc_size: 3159 * @total_size: virtual disk size in bytes 3160 * @cluster_size: cluster size in bytes 3161 * @refcount_order: refcount bits power-of-2 exponent 3162 * 3163 * Returns: Total number of bytes required for the fully allocated image 3164 * (including metadata). 3165 */ 3166 static int64_t qcow2_calc_prealloc_size(int64_t total_size, 3167 size_t cluster_size, 3168 int refcount_order) 3169 { 3170 int64_t meta_size = 0; 3171 uint64_t nl1e, nl2e; 3172 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size); 3173 3174 /* header: 1 cluster */ 3175 meta_size += cluster_size; 3176 3177 /* total size of L2 tables */ 3178 nl2e = aligned_total_size / cluster_size; 3179 nl2e = ROUND_UP(nl2e, cluster_size / sizeof(uint64_t)); 3180 meta_size += nl2e * sizeof(uint64_t); 3181 3182 /* total size of L1 tables */ 3183 nl1e = nl2e * sizeof(uint64_t) / cluster_size; 3184 nl1e = ROUND_UP(nl1e, cluster_size / sizeof(uint64_t)); 3185 meta_size += nl1e * sizeof(uint64_t); 3186 3187 /* total size of refcount table and blocks */ 3188 meta_size += qcow2_refcount_metadata_size( 3189 (meta_size + aligned_total_size) / cluster_size, 3190 cluster_size, refcount_order, false, NULL); 3191 3192 return meta_size + aligned_total_size; 3193 } 3194 3195 static bool validate_cluster_size(size_t cluster_size, Error **errp) 3196 { 3197 int cluster_bits = ctz32(cluster_size); 3198 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 3199 (1 << cluster_bits) != cluster_size) 3200 { 3201 error_setg(errp, "Cluster size must be a power of two between %d and " 3202 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 3203 return false; 3204 } 3205 return true; 3206 } 3207 3208 static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, Error **errp) 3209 { 3210 size_t cluster_size; 3211 3212 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE, 3213 DEFAULT_CLUSTER_SIZE); 3214 if (!validate_cluster_size(cluster_size, errp)) { 3215 return 0; 3216 } 3217 return cluster_size; 3218 } 3219 3220 static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp) 3221 { 3222 char *buf; 3223 int ret; 3224 3225 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL); 3226 if (!buf) { 3227 ret = 3; /* default */ 3228 } else if (!strcmp(buf, "0.10")) { 3229 ret = 2; 3230 } else if (!strcmp(buf, "1.1")) { 3231 ret = 3; 3232 } else { 3233 error_setg(errp, "Invalid compatibility level: '%s'", buf); 3234 ret = -EINVAL; 3235 } 3236 g_free(buf); 3237 return ret; 3238 } 3239 3240 static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version, 3241 Error **errp) 3242 { 3243 uint64_t refcount_bits; 3244 3245 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16); 3246 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) { 3247 error_setg(errp, "Refcount width must be a power of two and may not " 3248 "exceed 64 bits"); 3249 return 0; 3250 } 3251 3252 if (version < 3 && refcount_bits != 16) { 3253 error_setg(errp, "Different refcount widths than 16 bits require " 3254 "compatibility level 1.1 or above (use compat=1.1 or " 3255 "greater)"); 3256 return 0; 3257 } 3258 3259 return refcount_bits; 3260 } 3261 3262 static int coroutine_fn 3263 qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) 3264 { 3265 BlockdevCreateOptionsQcow2 *qcow2_opts; 3266 QDict *options; 3267 3268 /* 3269 * Open the image file and write a minimal qcow2 header. 3270 * 3271 * We keep things simple and start with a zero-sized image. We also 3272 * do without refcount blocks or a L1 table for now. We'll fix the 3273 * inconsistency later. 3274 * 3275 * We do need a refcount table because growing the refcount table means 3276 * allocating two new refcount blocks - the second of which would be at 3277 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 3278 * size for any qcow2 image. 3279 */ 3280 BlockBackend *blk = NULL; 3281 BlockDriverState *bs = NULL; 3282 BlockDriverState *data_bs = NULL; 3283 QCowHeader *header; 3284 size_t cluster_size; 3285 int version; 3286 int refcount_order; 3287 uint64_t* refcount_table; 3288 Error *local_err = NULL; 3289 int ret; 3290 3291 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2); 3292 qcow2_opts = &create_options->u.qcow2; 3293 3294 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp); 3295 if (bs == NULL) { 3296 return -EIO; 3297 } 3298 3299 /* Validate options and set default values */ 3300 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) { 3301 error_setg(errp, "Image size must be a multiple of %u bytes", 3302 (unsigned) BDRV_SECTOR_SIZE); 3303 ret = -EINVAL; 3304 goto out; 3305 } 3306 3307 if (qcow2_opts->has_version) { 3308 switch (qcow2_opts->version) { 3309 case BLOCKDEV_QCOW2_VERSION_V2: 3310 version = 2; 3311 break; 3312 case BLOCKDEV_QCOW2_VERSION_V3: 3313 version = 3; 3314 break; 3315 default: 3316 g_assert_not_reached(); 3317 } 3318 } else { 3319 version = 3; 3320 } 3321 3322 if (qcow2_opts->has_cluster_size) { 3323 cluster_size = qcow2_opts->cluster_size; 3324 } else { 3325 cluster_size = DEFAULT_CLUSTER_SIZE; 3326 } 3327 3328 if (!validate_cluster_size(cluster_size, errp)) { 3329 ret = -EINVAL; 3330 goto out; 3331 } 3332 3333 if (!qcow2_opts->has_preallocation) { 3334 qcow2_opts->preallocation = PREALLOC_MODE_OFF; 3335 } 3336 if (qcow2_opts->has_backing_file && 3337 qcow2_opts->preallocation != PREALLOC_MODE_OFF) 3338 { 3339 error_setg(errp, "Backing file and preallocation cannot be used at " 3340 "the same time"); 3341 ret = -EINVAL; 3342 goto out; 3343 } 3344 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) { 3345 error_setg(errp, "Backing format cannot be used without backing file"); 3346 ret = -EINVAL; 3347 goto out; 3348 } 3349 3350 if (!qcow2_opts->has_lazy_refcounts) { 3351 qcow2_opts->lazy_refcounts = false; 3352 } 3353 if (version < 3 && qcow2_opts->lazy_refcounts) { 3354 error_setg(errp, "Lazy refcounts only supported with compatibility " 3355 "level 1.1 and above (use version=v3 or greater)"); 3356 ret = -EINVAL; 3357 goto out; 3358 } 3359 3360 if (!qcow2_opts->has_refcount_bits) { 3361 qcow2_opts->refcount_bits = 16; 3362 } 3363 if (qcow2_opts->refcount_bits > 64 || 3364 !is_power_of_2(qcow2_opts->refcount_bits)) 3365 { 3366 error_setg(errp, "Refcount width must be a power of two and may not " 3367 "exceed 64 bits"); 3368 ret = -EINVAL; 3369 goto out; 3370 } 3371 if (version < 3 && qcow2_opts->refcount_bits != 16) { 3372 error_setg(errp, "Different refcount widths than 16 bits require " 3373 "compatibility level 1.1 or above (use version=v3 or " 3374 "greater)"); 3375 ret = -EINVAL; 3376 goto out; 3377 } 3378 refcount_order = ctz32(qcow2_opts->refcount_bits); 3379 3380 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) { 3381 error_setg(errp, "data-file-raw requires data-file"); 3382 ret = -EINVAL; 3383 goto out; 3384 } 3385 if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) { 3386 error_setg(errp, "Backing file and data-file-raw cannot be used at " 3387 "the same time"); 3388 ret = -EINVAL; 3389 goto out; 3390 } 3391 3392 if (qcow2_opts->data_file) { 3393 if (version < 3) { 3394 error_setg(errp, "External data files are only supported with " 3395 "compatibility level 1.1 and above (use version=v3 or " 3396 "greater)"); 3397 ret = -EINVAL; 3398 goto out; 3399 } 3400 data_bs = bdrv_open_blockdev_ref(qcow2_opts->data_file, errp); 3401 if (data_bs == NULL) { 3402 ret = -EIO; 3403 goto out; 3404 } 3405 } 3406 3407 /* Create BlockBackend to write to the image */ 3408 blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL, 3409 errp); 3410 if (!blk) { 3411 ret = -EPERM; 3412 goto out; 3413 } 3414 blk_set_allow_write_beyond_eof(blk, true); 3415 3416 /* Write the header */ 3417 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); 3418 header = g_malloc0(cluster_size); 3419 *header = (QCowHeader) { 3420 .magic = cpu_to_be32(QCOW_MAGIC), 3421 .version = cpu_to_be32(version), 3422 .cluster_bits = cpu_to_be32(ctz32(cluster_size)), 3423 .size = cpu_to_be64(0), 3424 .l1_table_offset = cpu_to_be64(0), 3425 .l1_size = cpu_to_be32(0), 3426 .refcount_table_offset = cpu_to_be64(cluster_size), 3427 .refcount_table_clusters = cpu_to_be32(1), 3428 .refcount_order = cpu_to_be32(refcount_order), 3429 .header_length = cpu_to_be32(sizeof(*header)), 3430 }; 3431 3432 /* We'll update this to correct value later */ 3433 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 3434 3435 if (qcow2_opts->lazy_refcounts) { 3436 header->compatible_features |= 3437 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 3438 } 3439 if (data_bs) { 3440 header->incompatible_features |= 3441 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE); 3442 } 3443 if (qcow2_opts->data_file_raw) { 3444 header->autoclear_features |= 3445 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW); 3446 } 3447 3448 ret = blk_pwrite(blk, 0, header, cluster_size, 0); 3449 g_free(header); 3450 if (ret < 0) { 3451 error_setg_errno(errp, -ret, "Could not write qcow2 header"); 3452 goto out; 3453 } 3454 3455 /* Write a refcount table with one refcount block */ 3456 refcount_table = g_malloc0(2 * cluster_size); 3457 refcount_table[0] = cpu_to_be64(2 * cluster_size); 3458 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0); 3459 g_free(refcount_table); 3460 3461 if (ret < 0) { 3462 error_setg_errno(errp, -ret, "Could not write refcount table"); 3463 goto out; 3464 } 3465 3466 blk_unref(blk); 3467 blk = NULL; 3468 3469 /* 3470 * And now open the image and make it consistent first (i.e. increase the 3471 * refcount of the cluster that is occupied by the header and the refcount 3472 * table) 3473 */ 3474 options = qdict_new(); 3475 qdict_put_str(options, "driver", "qcow2"); 3476 qdict_put_str(options, "file", bs->node_name); 3477 if (data_bs) { 3478 qdict_put_str(options, "data-file", data_bs->node_name); 3479 } 3480 blk = blk_new_open(NULL, NULL, options, 3481 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH, 3482 &local_err); 3483 if (blk == NULL) { 3484 error_propagate(errp, local_err); 3485 ret = -EIO; 3486 goto out; 3487 } 3488 3489 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size); 3490 if (ret < 0) { 3491 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 " 3492 "header and refcount table"); 3493 goto out; 3494 3495 } else if (ret != 0) { 3496 error_report("Huh, first cluster in empty image is already in use?"); 3497 abort(); 3498 } 3499 3500 /* Set the external data file if necessary */ 3501 if (data_bs) { 3502 BDRVQcow2State *s = blk_bs(blk)->opaque; 3503 s->image_data_file = g_strdup(data_bs->filename); 3504 } 3505 3506 /* Create a full header (including things like feature table) */ 3507 ret = qcow2_update_header(blk_bs(blk)); 3508 if (ret < 0) { 3509 error_setg_errno(errp, -ret, "Could not update qcow2 header"); 3510 goto out; 3511 } 3512 3513 /* Okay, now that we have a valid image, let's give it the right size */ 3514 ret = blk_truncate(blk, qcow2_opts->size, false, qcow2_opts->preallocation, 3515 0, errp); 3516 if (ret < 0) { 3517 error_prepend(errp, "Could not resize image: "); 3518 goto out; 3519 } 3520 3521 /* Want a backing file? There you go. */ 3522 if (qcow2_opts->has_backing_file) { 3523 const char *backing_format = NULL; 3524 3525 if (qcow2_opts->has_backing_fmt) { 3526 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt); 3527 } 3528 3529 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file, 3530 backing_format); 3531 if (ret < 0) { 3532 error_setg_errno(errp, -ret, "Could not assign backing file '%s' " 3533 "with format '%s'", qcow2_opts->backing_file, 3534 backing_format); 3535 goto out; 3536 } 3537 } 3538 3539 /* Want encryption? There you go. */ 3540 if (qcow2_opts->has_encrypt) { 3541 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp); 3542 if (ret < 0) { 3543 goto out; 3544 } 3545 } 3546 3547 blk_unref(blk); 3548 blk = NULL; 3549 3550 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning. 3551 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to 3552 * have to setup decryption context. We're not doing any I/O on the top 3553 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does 3554 * not have effect. 3555 */ 3556 options = qdict_new(); 3557 qdict_put_str(options, "driver", "qcow2"); 3558 qdict_put_str(options, "file", bs->node_name); 3559 if (data_bs) { 3560 qdict_put_str(options, "data-file", data_bs->node_name); 3561 } 3562 blk = blk_new_open(NULL, NULL, options, 3563 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO, 3564 &local_err); 3565 if (blk == NULL) { 3566 error_propagate(errp, local_err); 3567 ret = -EIO; 3568 goto out; 3569 } 3570 3571 ret = 0; 3572 out: 3573 blk_unref(blk); 3574 bdrv_unref(bs); 3575 bdrv_unref(data_bs); 3576 return ret; 3577 } 3578 3579 static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv, 3580 const char *filename, 3581 QemuOpts *opts, 3582 Error **errp) 3583 { 3584 BlockdevCreateOptions *create_options = NULL; 3585 QDict *qdict; 3586 Visitor *v; 3587 BlockDriverState *bs = NULL; 3588 BlockDriverState *data_bs = NULL; 3589 Error *local_err = NULL; 3590 const char *val; 3591 int ret; 3592 3593 /* Only the keyval visitor supports the dotted syntax needed for 3594 * encryption, so go through a QDict before getting a QAPI type. Ignore 3595 * options meant for the protocol layer so that the visitor doesn't 3596 * complain. */ 3597 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts, 3598 true); 3599 3600 /* Handle encryption options */ 3601 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT); 3602 if (val && !strcmp(val, "on")) { 3603 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow"); 3604 } else if (val && !strcmp(val, "off")) { 3605 qdict_del(qdict, BLOCK_OPT_ENCRYPT); 3606 } 3607 3608 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT); 3609 if (val && !strcmp(val, "aes")) { 3610 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow"); 3611 } 3612 3613 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into 3614 * version=v2/v3 below. */ 3615 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL); 3616 if (val && !strcmp(val, "0.10")) { 3617 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2"); 3618 } else if (val && !strcmp(val, "1.1")) { 3619 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3"); 3620 } 3621 3622 /* Change legacy command line options into QMP ones */ 3623 static const QDictRenames opt_renames[] = { 3624 { BLOCK_OPT_BACKING_FILE, "backing-file" }, 3625 { BLOCK_OPT_BACKING_FMT, "backing-fmt" }, 3626 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" }, 3627 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" }, 3628 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" }, 3629 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT }, 3630 { BLOCK_OPT_COMPAT_LEVEL, "version" }, 3631 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" }, 3632 { NULL, NULL }, 3633 }; 3634 3635 if (!qdict_rename_keys(qdict, opt_renames, errp)) { 3636 ret = -EINVAL; 3637 goto finish; 3638 } 3639 3640 /* Create and open the file (protocol layer) */ 3641 ret = bdrv_create_file(filename, opts, errp); 3642 if (ret < 0) { 3643 goto finish; 3644 } 3645 3646 bs = bdrv_open(filename, NULL, NULL, 3647 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp); 3648 if (bs == NULL) { 3649 ret = -EIO; 3650 goto finish; 3651 } 3652 3653 /* Create and open an external data file (protocol layer) */ 3654 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE); 3655 if (val) { 3656 ret = bdrv_create_file(val, opts, errp); 3657 if (ret < 0) { 3658 goto finish; 3659 } 3660 3661 data_bs = bdrv_open(val, NULL, NULL, 3662 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, 3663 errp); 3664 if (data_bs == NULL) { 3665 ret = -EIO; 3666 goto finish; 3667 } 3668 3669 qdict_del(qdict, BLOCK_OPT_DATA_FILE); 3670 qdict_put_str(qdict, "data-file", data_bs->node_name); 3671 } 3672 3673 /* Set 'driver' and 'node' options */ 3674 qdict_put_str(qdict, "driver", "qcow2"); 3675 qdict_put_str(qdict, "file", bs->node_name); 3676 3677 /* Now get the QAPI type BlockdevCreateOptions */ 3678 v = qobject_input_visitor_new_flat_confused(qdict, errp); 3679 if (!v) { 3680 ret = -EINVAL; 3681 goto finish; 3682 } 3683 3684 visit_type_BlockdevCreateOptions(v, NULL, &create_options, &local_err); 3685 visit_free(v); 3686 3687 if (local_err) { 3688 error_propagate(errp, local_err); 3689 ret = -EINVAL; 3690 goto finish; 3691 } 3692 3693 /* Silently round up size */ 3694 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size, 3695 BDRV_SECTOR_SIZE); 3696 3697 /* Create the qcow2 image (format layer) */ 3698 ret = qcow2_co_create(create_options, errp); 3699 if (ret < 0) { 3700 goto finish; 3701 } 3702 3703 ret = 0; 3704 finish: 3705 qobject_unref(qdict); 3706 bdrv_unref(bs); 3707 bdrv_unref(data_bs); 3708 qapi_free_BlockdevCreateOptions(create_options); 3709 return ret; 3710 } 3711 3712 3713 static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) 3714 { 3715 int64_t nr; 3716 int res; 3717 3718 /* Clamp to image length, before checking status of underlying sectors */ 3719 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) { 3720 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset; 3721 } 3722 3723 if (!bytes) { 3724 return true; 3725 } 3726 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL); 3727 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes; 3728 } 3729 3730 static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, 3731 int64_t offset, int bytes, BdrvRequestFlags flags) 3732 { 3733 int ret; 3734 BDRVQcow2State *s = bs->opaque; 3735 3736 uint32_t head = offset % s->cluster_size; 3737 uint32_t tail = (offset + bytes) % s->cluster_size; 3738 3739 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes); 3740 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) { 3741 tail = 0; 3742 } 3743 3744 if (head || tail) { 3745 uint64_t off; 3746 unsigned int nr; 3747 3748 assert(head + bytes <= s->cluster_size); 3749 3750 /* check whether remainder of cluster already reads as zero */ 3751 if (!(is_zero(bs, offset - head, head) && 3752 is_zero(bs, offset + bytes, 3753 tail ? s->cluster_size - tail : 0))) { 3754 return -ENOTSUP; 3755 } 3756 3757 qemu_co_mutex_lock(&s->lock); 3758 /* We can have new write after previous check */ 3759 offset = QEMU_ALIGN_DOWN(offset, s->cluster_size); 3760 bytes = s->cluster_size; 3761 nr = s->cluster_size; 3762 ret = qcow2_get_cluster_offset(bs, offset, &nr, &off); 3763 if (ret != QCOW2_CLUSTER_UNALLOCATED && 3764 ret != QCOW2_CLUSTER_ZERO_PLAIN && 3765 ret != QCOW2_CLUSTER_ZERO_ALLOC) { 3766 qemu_co_mutex_unlock(&s->lock); 3767 return -ENOTSUP; 3768 } 3769 } else { 3770 qemu_co_mutex_lock(&s->lock); 3771 } 3772 3773 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes); 3774 3775 /* Whatever is left can use real zero clusters */ 3776 ret = qcow2_cluster_zeroize(bs, offset, bytes, flags); 3777 qemu_co_mutex_unlock(&s->lock); 3778 3779 return ret; 3780 } 3781 3782 static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, 3783 int64_t offset, int bytes) 3784 { 3785 int ret; 3786 BDRVQcow2State *s = bs->opaque; 3787 3788 /* If the image does not support QCOW_OFLAG_ZERO then discarding 3789 * clusters could expose stale data from the backing file. */ 3790 if (s->qcow_version < 3 && bs->backing) { 3791 return -ENOTSUP; 3792 } 3793 3794 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) { 3795 assert(bytes < s->cluster_size); 3796 /* Ignore partial clusters, except for the special case of the 3797 * complete partial cluster at the end of an unaligned file */ 3798 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) || 3799 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) { 3800 return -ENOTSUP; 3801 } 3802 } 3803 3804 qemu_co_mutex_lock(&s->lock); 3805 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST, 3806 false); 3807 qemu_co_mutex_unlock(&s->lock); 3808 return ret; 3809 } 3810 3811 static int coroutine_fn 3812 qcow2_co_copy_range_from(BlockDriverState *bs, 3813 BdrvChild *src, uint64_t src_offset, 3814 BdrvChild *dst, uint64_t dst_offset, 3815 uint64_t bytes, BdrvRequestFlags read_flags, 3816 BdrvRequestFlags write_flags) 3817 { 3818 BDRVQcow2State *s = bs->opaque; 3819 int ret; 3820 unsigned int cur_bytes; /* number of bytes in current iteration */ 3821 BdrvChild *child = NULL; 3822 BdrvRequestFlags cur_write_flags; 3823 3824 assert(!bs->encrypted); 3825 qemu_co_mutex_lock(&s->lock); 3826 3827 while (bytes != 0) { 3828 uint64_t copy_offset = 0; 3829 /* prepare next request */ 3830 cur_bytes = MIN(bytes, INT_MAX); 3831 cur_write_flags = write_flags; 3832 3833 ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); 3834 if (ret < 0) { 3835 goto out; 3836 } 3837 3838 switch (ret) { 3839 case QCOW2_CLUSTER_UNALLOCATED: 3840 if (bs->backing && bs->backing->bs) { 3841 int64_t backing_length = bdrv_getlength(bs->backing->bs); 3842 if (src_offset >= backing_length) { 3843 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3844 } else { 3845 child = bs->backing; 3846 cur_bytes = MIN(cur_bytes, backing_length - src_offset); 3847 copy_offset = src_offset; 3848 } 3849 } else { 3850 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3851 } 3852 break; 3853 3854 case QCOW2_CLUSTER_ZERO_PLAIN: 3855 case QCOW2_CLUSTER_ZERO_ALLOC: 3856 cur_write_flags |= BDRV_REQ_ZERO_WRITE; 3857 break; 3858 3859 case QCOW2_CLUSTER_COMPRESSED: 3860 ret = -ENOTSUP; 3861 goto out; 3862 3863 case QCOW2_CLUSTER_NORMAL: 3864 child = s->data_file; 3865 copy_offset += offset_into_cluster(s, src_offset); 3866 break; 3867 3868 default: 3869 abort(); 3870 } 3871 qemu_co_mutex_unlock(&s->lock); 3872 ret = bdrv_co_copy_range_from(child, 3873 copy_offset, 3874 dst, dst_offset, 3875 cur_bytes, read_flags, cur_write_flags); 3876 qemu_co_mutex_lock(&s->lock); 3877 if (ret < 0) { 3878 goto out; 3879 } 3880 3881 bytes -= cur_bytes; 3882 src_offset += cur_bytes; 3883 dst_offset += cur_bytes; 3884 } 3885 ret = 0; 3886 3887 out: 3888 qemu_co_mutex_unlock(&s->lock); 3889 return ret; 3890 } 3891 3892 static int coroutine_fn 3893 qcow2_co_copy_range_to(BlockDriverState *bs, 3894 BdrvChild *src, uint64_t src_offset, 3895 BdrvChild *dst, uint64_t dst_offset, 3896 uint64_t bytes, BdrvRequestFlags read_flags, 3897 BdrvRequestFlags write_flags) 3898 { 3899 BDRVQcow2State *s = bs->opaque; 3900 int offset_in_cluster; 3901 int ret; 3902 unsigned int cur_bytes; /* number of sectors in current iteration */ 3903 uint64_t cluster_offset; 3904 QCowL2Meta *l2meta = NULL; 3905 3906 assert(!bs->encrypted); 3907 3908 qemu_co_mutex_lock(&s->lock); 3909 3910 while (bytes != 0) { 3911 3912 l2meta = NULL; 3913 3914 offset_in_cluster = offset_into_cluster(s, dst_offset); 3915 cur_bytes = MIN(bytes, INT_MAX); 3916 3917 /* TODO: 3918 * If src->bs == dst->bs, we could simply copy by incrementing 3919 * the refcnt, without copying user data. 3920 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ 3921 ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, 3922 &cluster_offset, &l2meta); 3923 if (ret < 0) { 3924 goto fail; 3925 } 3926 3927 assert(offset_into_cluster(s, cluster_offset) == 0); 3928 3929 ret = qcow2_pre_write_overlap_check(bs, 0, 3930 cluster_offset + offset_in_cluster, cur_bytes, true); 3931 if (ret < 0) { 3932 goto fail; 3933 } 3934 3935 qemu_co_mutex_unlock(&s->lock); 3936 ret = bdrv_co_copy_range_to(src, src_offset, 3937 s->data_file, 3938 cluster_offset + offset_in_cluster, 3939 cur_bytes, read_flags, write_flags); 3940 qemu_co_mutex_lock(&s->lock); 3941 if (ret < 0) { 3942 goto fail; 3943 } 3944 3945 ret = qcow2_handle_l2meta(bs, &l2meta, true); 3946 if (ret) { 3947 goto fail; 3948 } 3949 3950 bytes -= cur_bytes; 3951 src_offset += cur_bytes; 3952 dst_offset += cur_bytes; 3953 } 3954 ret = 0; 3955 3956 fail: 3957 qcow2_handle_l2meta(bs, &l2meta, false); 3958 3959 qemu_co_mutex_unlock(&s->lock); 3960 3961 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 3962 3963 return ret; 3964 } 3965 3966 static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset, 3967 bool exact, PreallocMode prealloc, 3968 BdrvRequestFlags flags, Error **errp) 3969 { 3970 BDRVQcow2State *s = bs->opaque; 3971 uint64_t old_length; 3972 int64_t new_l1_size; 3973 int ret; 3974 QDict *options; 3975 3976 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA && 3977 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL) 3978 { 3979 error_setg(errp, "Unsupported preallocation mode '%s'", 3980 PreallocMode_str(prealloc)); 3981 return -ENOTSUP; 3982 } 3983 3984 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) { 3985 error_setg(errp, "The new size must be a multiple of %u", 3986 (unsigned) BDRV_SECTOR_SIZE); 3987 return -EINVAL; 3988 } 3989 3990 qemu_co_mutex_lock(&s->lock); 3991 3992 /* 3993 * Even though we store snapshot size for all images, it was not 3994 * required until v3, so it is not safe to proceed for v2. 3995 */ 3996 if (s->nb_snapshots && s->qcow_version < 3) { 3997 error_setg(errp, "Can't resize a v2 image which has snapshots"); 3998 ret = -ENOTSUP; 3999 goto fail; 4000 } 4001 4002 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */ 4003 if (qcow2_truncate_bitmaps_check(bs, errp)) { 4004 ret = -ENOTSUP; 4005 goto fail; 4006 } 4007 4008 old_length = bs->total_sectors * BDRV_SECTOR_SIZE; 4009 new_l1_size = size_to_l1(s, offset); 4010 4011 if (offset < old_length) { 4012 int64_t last_cluster, old_file_size; 4013 if (prealloc != PREALLOC_MODE_OFF) { 4014 error_setg(errp, 4015 "Preallocation can't be used for shrinking an image"); 4016 ret = -EINVAL; 4017 goto fail; 4018 } 4019 4020 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size), 4021 old_length - ROUND_UP(offset, 4022 s->cluster_size), 4023 QCOW2_DISCARD_ALWAYS, true); 4024 if (ret < 0) { 4025 error_setg_errno(errp, -ret, "Failed to discard cropped clusters"); 4026 goto fail; 4027 } 4028 4029 ret = qcow2_shrink_l1_table(bs, new_l1_size); 4030 if (ret < 0) { 4031 error_setg_errno(errp, -ret, 4032 "Failed to reduce the number of L2 tables"); 4033 goto fail; 4034 } 4035 4036 ret = qcow2_shrink_reftable(bs); 4037 if (ret < 0) { 4038 error_setg_errno(errp, -ret, 4039 "Failed to discard unused refblocks"); 4040 goto fail; 4041 } 4042 4043 old_file_size = bdrv_getlength(bs->file->bs); 4044 if (old_file_size < 0) { 4045 error_setg_errno(errp, -old_file_size, 4046 "Failed to inquire current file length"); 4047 ret = old_file_size; 4048 goto fail; 4049 } 4050 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4051 if (last_cluster < 0) { 4052 error_setg_errno(errp, -last_cluster, 4053 "Failed to find the last cluster"); 4054 ret = last_cluster; 4055 goto fail; 4056 } 4057 if ((last_cluster + 1) * s->cluster_size < old_file_size) { 4058 Error *local_err = NULL; 4059 4060 /* 4061 * Do not pass @exact here: It will not help the user if 4062 * we get an error here just because they wanted to shrink 4063 * their qcow2 image (on a block device) with qemu-img. 4064 * (And on the qcow2 layer, the @exact requirement is 4065 * always fulfilled, so there is no need to pass it on.) 4066 */ 4067 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size, 4068 false, PREALLOC_MODE_OFF, 0, &local_err); 4069 if (local_err) { 4070 warn_reportf_err(local_err, 4071 "Failed to truncate the tail of the image: "); 4072 } 4073 } 4074 } else { 4075 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 4076 if (ret < 0) { 4077 error_setg_errno(errp, -ret, "Failed to grow the L1 table"); 4078 goto fail; 4079 } 4080 } 4081 4082 switch (prealloc) { 4083 case PREALLOC_MODE_OFF: 4084 if (has_data_file(bs)) { 4085 /* 4086 * If the caller wants an exact resize, the external data 4087 * file should be resized to the exact target size, too, 4088 * so we pass @exact here. 4089 */ 4090 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0, 4091 errp); 4092 if (ret < 0) { 4093 goto fail; 4094 } 4095 } 4096 break; 4097 4098 case PREALLOC_MODE_METADATA: 4099 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4100 if (ret < 0) { 4101 goto fail; 4102 } 4103 break; 4104 4105 case PREALLOC_MODE_FALLOC: 4106 case PREALLOC_MODE_FULL: 4107 { 4108 int64_t allocation_start, host_offset, guest_offset; 4109 int64_t clusters_allocated; 4110 int64_t old_file_size, last_cluster, new_file_size; 4111 uint64_t nb_new_data_clusters, nb_new_l2_tables; 4112 4113 /* With a data file, preallocation means just allocating the metadata 4114 * and forwarding the truncate request to the data file */ 4115 if (has_data_file(bs)) { 4116 ret = preallocate_co(bs, old_length, offset, prealloc, errp); 4117 if (ret < 0) { 4118 goto fail; 4119 } 4120 break; 4121 } 4122 4123 old_file_size = bdrv_getlength(bs->file->bs); 4124 if (old_file_size < 0) { 4125 error_setg_errno(errp, -old_file_size, 4126 "Failed to inquire current file length"); 4127 ret = old_file_size; 4128 goto fail; 4129 } 4130 4131 last_cluster = qcow2_get_last_cluster(bs, old_file_size); 4132 if (last_cluster >= 0) { 4133 old_file_size = (last_cluster + 1) * s->cluster_size; 4134 } else { 4135 old_file_size = ROUND_UP(old_file_size, s->cluster_size); 4136 } 4137 4138 nb_new_data_clusters = DIV_ROUND_UP(offset - old_length, 4139 s->cluster_size); 4140 4141 /* This is an overestimation; we will not actually allocate space for 4142 * these in the file but just make sure the new refcount structures are 4143 * able to cover them so we will not have to allocate new refblocks 4144 * while entering the data blocks in the potentially new L2 tables. 4145 * (We do not actually care where the L2 tables are placed. Maybe they 4146 * are already allocated or they can be placed somewhere before 4147 * @old_file_size. It does not matter because they will be fully 4148 * allocated automatically, so they do not need to be covered by the 4149 * preallocation. All that matters is that we will not have to allocate 4150 * new refcount structures for them.) */ 4151 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters, 4152 s->cluster_size / sizeof(uint64_t)); 4153 /* The cluster range may not be aligned to L2 boundaries, so add one L2 4154 * table for a potential head/tail */ 4155 nb_new_l2_tables++; 4156 4157 allocation_start = qcow2_refcount_area(bs, old_file_size, 4158 nb_new_data_clusters + 4159 nb_new_l2_tables, 4160 true, 0, 0); 4161 if (allocation_start < 0) { 4162 error_setg_errno(errp, -allocation_start, 4163 "Failed to resize refcount structures"); 4164 ret = allocation_start; 4165 goto fail; 4166 } 4167 4168 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start, 4169 nb_new_data_clusters); 4170 if (clusters_allocated < 0) { 4171 error_setg_errno(errp, -clusters_allocated, 4172 "Failed to allocate data clusters"); 4173 ret = clusters_allocated; 4174 goto fail; 4175 } 4176 4177 assert(clusters_allocated == nb_new_data_clusters); 4178 4179 /* Allocate the data area */ 4180 new_file_size = allocation_start + 4181 nb_new_data_clusters * s->cluster_size; 4182 /* 4183 * Image file grows, so @exact does not matter. 4184 * 4185 * If we need to zero out the new area, try first whether the protocol 4186 * driver can already take care of this. 4187 */ 4188 if (flags & BDRV_REQ_ZERO_WRITE) { 4189 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 4190 BDRV_REQ_ZERO_WRITE, NULL); 4191 if (ret >= 0) { 4192 flags &= ~BDRV_REQ_ZERO_WRITE; 4193 } 4194 } else { 4195 ret = -1; 4196 } 4197 if (ret < 0) { 4198 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0, 4199 errp); 4200 } 4201 if (ret < 0) { 4202 error_prepend(errp, "Failed to resize underlying file: "); 4203 qcow2_free_clusters(bs, allocation_start, 4204 nb_new_data_clusters * s->cluster_size, 4205 QCOW2_DISCARD_OTHER); 4206 goto fail; 4207 } 4208 4209 /* Create the necessary L2 entries */ 4210 host_offset = allocation_start; 4211 guest_offset = old_length; 4212 while (nb_new_data_clusters) { 4213 int64_t nb_clusters = MIN( 4214 nb_new_data_clusters, 4215 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset)); 4216 QCowL2Meta allocation = { 4217 .offset = guest_offset, 4218 .alloc_offset = host_offset, 4219 .nb_clusters = nb_clusters, 4220 }; 4221 qemu_co_queue_init(&allocation.dependent_requests); 4222 4223 ret = qcow2_alloc_cluster_link_l2(bs, &allocation); 4224 if (ret < 0) { 4225 error_setg_errno(errp, -ret, "Failed to update L2 tables"); 4226 qcow2_free_clusters(bs, host_offset, 4227 nb_new_data_clusters * s->cluster_size, 4228 QCOW2_DISCARD_OTHER); 4229 goto fail; 4230 } 4231 4232 guest_offset += nb_clusters * s->cluster_size; 4233 host_offset += nb_clusters * s->cluster_size; 4234 nb_new_data_clusters -= nb_clusters; 4235 } 4236 break; 4237 } 4238 4239 default: 4240 g_assert_not_reached(); 4241 } 4242 4243 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) { 4244 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->cluster_size); 4245 4246 /* 4247 * Use zero clusters as much as we can. qcow2_cluster_zeroize() 4248 * requires a cluster-aligned start. The end may be unaligned if it is 4249 * at the end of the image (which it is here). 4250 */ 4251 if (offset > zero_start) { 4252 ret = qcow2_cluster_zeroize(bs, zero_start, offset - zero_start, 0); 4253 if (ret < 0) { 4254 error_setg_errno(errp, -ret, "Failed to zero out new clusters"); 4255 goto fail; 4256 } 4257 } 4258 4259 /* Write explicit zeros for the unaligned head */ 4260 if (zero_start > old_length) { 4261 uint64_t len = MIN(zero_start, offset) - old_length; 4262 uint8_t *buf = qemu_blockalign0(bs, len); 4263 QEMUIOVector qiov; 4264 qemu_iovec_init_buf(&qiov, buf, len); 4265 4266 qemu_co_mutex_unlock(&s->lock); 4267 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0); 4268 qemu_co_mutex_lock(&s->lock); 4269 4270 qemu_vfree(buf); 4271 if (ret < 0) { 4272 error_setg_errno(errp, -ret, "Failed to zero out the new area"); 4273 goto fail; 4274 } 4275 } 4276 } 4277 4278 if (prealloc != PREALLOC_MODE_OFF) { 4279 /* Flush metadata before actually changing the image size */ 4280 ret = qcow2_write_caches(bs); 4281 if (ret < 0) { 4282 error_setg_errno(errp, -ret, 4283 "Failed to flush the preallocated area to disk"); 4284 goto fail; 4285 } 4286 } 4287 4288 bs->total_sectors = offset / BDRV_SECTOR_SIZE; 4289 4290 /* write updated header.size */ 4291 offset = cpu_to_be64(offset); 4292 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 4293 &offset, sizeof(uint64_t)); 4294 if (ret < 0) { 4295 error_setg_errno(errp, -ret, "Failed to update the image size"); 4296 goto fail; 4297 } 4298 4299 s->l1_vm_state_index = new_l1_size; 4300 4301 /* Update cache sizes */ 4302 options = qdict_clone_shallow(bs->options); 4303 ret = qcow2_update_options(bs, options, s->flags, errp); 4304 qobject_unref(options); 4305 if (ret < 0) { 4306 goto fail; 4307 } 4308 ret = 0; 4309 fail: 4310 qemu_co_mutex_unlock(&s->lock); 4311 return ret; 4312 } 4313 4314 static coroutine_fn int 4315 qcow2_co_pwritev_compressed_task(BlockDriverState *bs, 4316 uint64_t offset, uint64_t bytes, 4317 QEMUIOVector *qiov, size_t qiov_offset) 4318 { 4319 BDRVQcow2State *s = bs->opaque; 4320 int ret; 4321 ssize_t out_len; 4322 uint8_t *buf, *out_buf; 4323 uint64_t cluster_offset; 4324 4325 assert(bytes == s->cluster_size || (bytes < s->cluster_size && 4326 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); 4327 4328 buf = qemu_blockalign(bs, s->cluster_size); 4329 if (bytes < s->cluster_size) { 4330 /* Zero-pad last write if image size is not cluster aligned */ 4331 memset(buf + bytes, 0, s->cluster_size - bytes); 4332 } 4333 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes); 4334 4335 out_buf = g_malloc(s->cluster_size); 4336 4337 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1, 4338 buf, s->cluster_size); 4339 if (out_len == -ENOMEM) { 4340 /* could not compress: write normal cluster */ 4341 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0); 4342 if (ret < 0) { 4343 goto fail; 4344 } 4345 goto success; 4346 } else if (out_len < 0) { 4347 ret = -EINVAL; 4348 goto fail; 4349 } 4350 4351 qemu_co_mutex_lock(&s->lock); 4352 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len, 4353 &cluster_offset); 4354 if (ret < 0) { 4355 qemu_co_mutex_unlock(&s->lock); 4356 goto fail; 4357 } 4358 4359 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true); 4360 qemu_co_mutex_unlock(&s->lock); 4361 if (ret < 0) { 4362 goto fail; 4363 } 4364 4365 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED); 4366 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0); 4367 if (ret < 0) { 4368 goto fail; 4369 } 4370 success: 4371 ret = 0; 4372 fail: 4373 qemu_vfree(buf); 4374 g_free(out_buf); 4375 return ret; 4376 } 4377 4378 static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task) 4379 { 4380 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); 4381 4382 assert(!t->cluster_type && !t->l2meta); 4383 4384 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, 4385 t->qiov_offset); 4386 } 4387 4388 /* 4389 * XXX: put compressed sectors first, then all the cluster aligned 4390 * tables to avoid losing bytes in alignment 4391 */ 4392 static coroutine_fn int 4393 qcow2_co_pwritev_compressed_part(BlockDriverState *bs, 4394 uint64_t offset, uint64_t bytes, 4395 QEMUIOVector *qiov, size_t qiov_offset) 4396 { 4397 BDRVQcow2State *s = bs->opaque; 4398 AioTaskPool *aio = NULL; 4399 int ret = 0; 4400 4401 if (has_data_file(bs)) { 4402 return -ENOTSUP; 4403 } 4404 4405 if (bytes == 0) { 4406 /* 4407 * align end of file to a sector boundary to ease reading with 4408 * sector based I/Os 4409 */ 4410 int64_t len = bdrv_getlength(bs->file->bs); 4411 if (len < 0) { 4412 return len; 4413 } 4414 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0, 4415 NULL); 4416 } 4417 4418 if (offset_into_cluster(s, offset)) { 4419 return -EINVAL; 4420 } 4421 4422 if (offset_into_cluster(s, bytes) && 4423 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) { 4424 return -EINVAL; 4425 } 4426 4427 while (bytes && aio_task_pool_status(aio) == 0) { 4428 uint64_t chunk_size = MIN(bytes, s->cluster_size); 4429 4430 if (!aio && chunk_size != bytes) { 4431 aio = aio_task_pool_new(QCOW2_MAX_WORKERS); 4432 } 4433 4434 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, 4435 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); 4436 if (ret < 0) { 4437 break; 4438 } 4439 qiov_offset += chunk_size; 4440 offset += chunk_size; 4441 bytes -= chunk_size; 4442 } 4443 4444 if (aio) { 4445 aio_task_pool_wait_all(aio); 4446 if (ret == 0) { 4447 ret = aio_task_pool_status(aio); 4448 } 4449 g_free(aio); 4450 } 4451 4452 return ret; 4453 } 4454 4455 static int coroutine_fn 4456 qcow2_co_preadv_compressed(BlockDriverState *bs, 4457 uint64_t file_cluster_offset, 4458 uint64_t offset, 4459 uint64_t bytes, 4460 QEMUIOVector *qiov, 4461 size_t qiov_offset) 4462 { 4463 BDRVQcow2State *s = bs->opaque; 4464 int ret = 0, csize, nb_csectors; 4465 uint64_t coffset; 4466 uint8_t *buf, *out_buf; 4467 int offset_in_cluster = offset_into_cluster(s, offset); 4468 4469 coffset = file_cluster_offset & s->cluster_offset_mask; 4470 nb_csectors = ((file_cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 4471 csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - 4472 (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK); 4473 4474 buf = g_try_malloc(csize); 4475 if (!buf) { 4476 return -ENOMEM; 4477 } 4478 4479 out_buf = qemu_blockalign(bs, s->cluster_size); 4480 4481 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 4482 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0); 4483 if (ret < 0) { 4484 goto fail; 4485 } 4486 4487 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) { 4488 ret = -EIO; 4489 goto fail; 4490 } 4491 4492 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes); 4493 4494 fail: 4495 qemu_vfree(out_buf); 4496 g_free(buf); 4497 4498 return ret; 4499 } 4500 4501 static int make_completely_empty(BlockDriverState *bs) 4502 { 4503 BDRVQcow2State *s = bs->opaque; 4504 Error *local_err = NULL; 4505 int ret, l1_clusters; 4506 int64_t offset; 4507 uint64_t *new_reftable = NULL; 4508 uint64_t rt_entry, l1_size2; 4509 struct { 4510 uint64_t l1_offset; 4511 uint64_t reftable_offset; 4512 uint32_t reftable_clusters; 4513 } QEMU_PACKED l1_ofs_rt_ofs_cls; 4514 4515 ret = qcow2_cache_empty(bs, s->l2_table_cache); 4516 if (ret < 0) { 4517 goto fail; 4518 } 4519 4520 ret = qcow2_cache_empty(bs, s->refcount_block_cache); 4521 if (ret < 0) { 4522 goto fail; 4523 } 4524 4525 /* Refcounts will be broken utterly */ 4526 ret = qcow2_mark_dirty(bs); 4527 if (ret < 0) { 4528 goto fail; 4529 } 4530 4531 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4532 4533 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4534 l1_size2 = (uint64_t)s->l1_size * sizeof(uint64_t); 4535 4536 /* After this call, neither the in-memory nor the on-disk refcount 4537 * information accurately describe the actual references */ 4538 4539 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset, 4540 l1_clusters * s->cluster_size, 0); 4541 if (ret < 0) { 4542 goto fail_broken_refcounts; 4543 } 4544 memset(s->l1_table, 0, l1_size2); 4545 4546 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE); 4547 4548 /* Overwrite enough clusters at the beginning of the sectors to place 4549 * the refcount table, a refcount block and the L1 table in; this may 4550 * overwrite parts of the existing refcount and L1 table, which is not 4551 * an issue because the dirty flag is set, complete data loss is in fact 4552 * desired and partial data loss is consequently fine as well */ 4553 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size, 4554 (2 + l1_clusters) * s->cluster_size, 0); 4555 /* This call (even if it failed overall) may have overwritten on-disk 4556 * refcount structures; in that case, the in-memory refcount information 4557 * will probably differ from the on-disk information which makes the BDS 4558 * unusable */ 4559 if (ret < 0) { 4560 goto fail_broken_refcounts; 4561 } 4562 4563 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 4564 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 4565 4566 /* "Create" an empty reftable (one cluster) directly after the image 4567 * header and an empty L1 table three clusters after the image header; 4568 * the cluster between those two will be used as the first refblock */ 4569 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size); 4570 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size); 4571 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1); 4572 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset), 4573 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls)); 4574 if (ret < 0) { 4575 goto fail_broken_refcounts; 4576 } 4577 4578 s->l1_table_offset = 3 * s->cluster_size; 4579 4580 new_reftable = g_try_new0(uint64_t, s->cluster_size / sizeof(uint64_t)); 4581 if (!new_reftable) { 4582 ret = -ENOMEM; 4583 goto fail_broken_refcounts; 4584 } 4585 4586 s->refcount_table_offset = s->cluster_size; 4587 s->refcount_table_size = s->cluster_size / sizeof(uint64_t); 4588 s->max_refcount_table_index = 0; 4589 4590 g_free(s->refcount_table); 4591 s->refcount_table = new_reftable; 4592 new_reftable = NULL; 4593 4594 /* Now the in-memory refcount information again corresponds to the on-disk 4595 * information (reftable is empty and no refblocks (the refblock cache is 4596 * empty)); however, this means some clusters (e.g. the image header) are 4597 * referenced, but not refcounted, but the normal qcow2 code assumes that 4598 * the in-memory information is always correct */ 4599 4600 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 4601 4602 /* Enter the first refblock into the reftable */ 4603 rt_entry = cpu_to_be64(2 * s->cluster_size); 4604 ret = bdrv_pwrite_sync(bs->file, s->cluster_size, 4605 &rt_entry, sizeof(rt_entry)); 4606 if (ret < 0) { 4607 goto fail_broken_refcounts; 4608 } 4609 s->refcount_table[0] = 2 * s->cluster_size; 4610 4611 s->free_cluster_index = 0; 4612 assert(3 + l1_clusters <= s->refcount_block_size); 4613 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2); 4614 if (offset < 0) { 4615 ret = offset; 4616 goto fail_broken_refcounts; 4617 } else if (offset > 0) { 4618 error_report("First cluster in emptied image is in use"); 4619 abort(); 4620 } 4621 4622 /* Now finally the in-memory information corresponds to the on-disk 4623 * structures and is correct */ 4624 ret = qcow2_mark_clean(bs); 4625 if (ret < 0) { 4626 goto fail; 4627 } 4628 4629 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false, 4630 PREALLOC_MODE_OFF, 0, &local_err); 4631 if (ret < 0) { 4632 error_report_err(local_err); 4633 goto fail; 4634 } 4635 4636 return 0; 4637 4638 fail_broken_refcounts: 4639 /* The BDS is unusable at this point. If we wanted to make it usable, we 4640 * would have to call qcow2_refcount_close(), qcow2_refcount_init(), 4641 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init() 4642 * again. However, because the functions which could have caused this error 4643 * path to be taken are used by those functions as well, it's very likely 4644 * that that sequence will fail as well. Therefore, just eject the BDS. */ 4645 bs->drv = NULL; 4646 4647 fail: 4648 g_free(new_reftable); 4649 return ret; 4650 } 4651 4652 static int qcow2_make_empty(BlockDriverState *bs) 4653 { 4654 BDRVQcow2State *s = bs->opaque; 4655 uint64_t offset, end_offset; 4656 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size); 4657 int l1_clusters, ret = 0; 4658 4659 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / sizeof(uint64_t)); 4660 4661 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps && 4662 3 + l1_clusters <= s->refcount_block_size && 4663 s->crypt_method_header != QCOW_CRYPT_LUKS && 4664 !has_data_file(bs)) { 4665 /* The following function only works for qcow2 v3 images (it 4666 * requires the dirty flag) and only as long as there are no 4667 * features that reserve extra clusters (such as snapshots, 4668 * LUKS header, or persistent bitmaps), because it completely 4669 * empties the image. Furthermore, the L1 table and three 4670 * additional clusters (image header, refcount table, one 4671 * refcount block) have to fit inside one refcount block. It 4672 * only resets the image file, i.e. does not work with an 4673 * external data file. */ 4674 return make_completely_empty(bs); 4675 } 4676 4677 /* This fallback code simply discards every active cluster; this is slow, 4678 * but works in all cases */ 4679 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE; 4680 for (offset = 0; offset < end_offset; offset += step) { 4681 /* As this function is generally used after committing an external 4682 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the 4683 * default action for this kind of discard is to pass the discard, 4684 * which will ideally result in an actually smaller image file, as 4685 * is probably desired. */ 4686 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset), 4687 QCOW2_DISCARD_SNAPSHOT, true); 4688 if (ret < 0) { 4689 break; 4690 } 4691 } 4692 4693 return ret; 4694 } 4695 4696 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 4697 { 4698 BDRVQcow2State *s = bs->opaque; 4699 int ret; 4700 4701 qemu_co_mutex_lock(&s->lock); 4702 ret = qcow2_write_caches(bs); 4703 qemu_co_mutex_unlock(&s->lock); 4704 4705 return ret; 4706 } 4707 4708 static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs, 4709 Error **errp) 4710 { 4711 Error *local_err = NULL; 4712 BlockMeasureInfo *info; 4713 uint64_t required = 0; /* bytes that contribute to required size */ 4714 uint64_t virtual_size; /* disk size as seen by guest */ 4715 uint64_t refcount_bits; 4716 uint64_t l2_tables; 4717 uint64_t luks_payload_size = 0; 4718 size_t cluster_size; 4719 int version; 4720 char *optstr; 4721 PreallocMode prealloc; 4722 bool has_backing_file; 4723 bool has_luks; 4724 4725 /* Parse image creation options */ 4726 cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); 4727 if (local_err) { 4728 goto err; 4729 } 4730 4731 version = qcow2_opt_get_version_del(opts, &local_err); 4732 if (local_err) { 4733 goto err; 4734 } 4735 4736 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); 4737 if (local_err) { 4738 goto err; 4739 } 4740 4741 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); 4742 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr, 4743 PREALLOC_MODE_OFF, &local_err); 4744 g_free(optstr); 4745 if (local_err) { 4746 goto err; 4747 } 4748 4749 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); 4750 has_backing_file = !!optstr; 4751 g_free(optstr); 4752 4753 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); 4754 has_luks = optstr && strcmp(optstr, "luks") == 0; 4755 g_free(optstr); 4756 4757 if (has_luks) { 4758 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL; 4759 QDict *opts_qdict; 4760 QDict *cryptoopts; 4761 size_t headerlen; 4762 4763 opts_qdict = qemu_opts_to_qdict(opts, NULL); 4764 qdict_extract_subqdict(opts_qdict, &cryptoopts, "encrypt."); 4765 qobject_unref(opts_qdict); 4766 4767 qdict_put_str(cryptoopts, "format", "luks"); 4768 4769 create_opts = block_crypto_create_opts_init(cryptoopts, errp); 4770 qobject_unref(cryptoopts); 4771 if (!create_opts) { 4772 goto err; 4773 } 4774 4775 if (!qcrypto_block_calculate_payload_offset(create_opts, 4776 "encrypt.", 4777 &headerlen, 4778 &local_err)) { 4779 goto err; 4780 } 4781 4782 luks_payload_size = ROUND_UP(headerlen, cluster_size); 4783 } 4784 4785 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); 4786 virtual_size = ROUND_UP(virtual_size, cluster_size); 4787 4788 /* Check that virtual disk size is valid */ 4789 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size, 4790 cluster_size / sizeof(uint64_t)); 4791 if (l2_tables * sizeof(uint64_t) > QCOW_MAX_L1_SIZE) { 4792 error_setg(&local_err, "The image size is too large " 4793 "(try using a larger cluster size)"); 4794 goto err; 4795 } 4796 4797 /* Account for input image */ 4798 if (in_bs) { 4799 int64_t ssize = bdrv_getlength(in_bs); 4800 if (ssize < 0) { 4801 error_setg_errno(&local_err, -ssize, 4802 "Unable to get image virtual_size"); 4803 goto err; 4804 } 4805 4806 virtual_size = ROUND_UP(ssize, cluster_size); 4807 4808 if (has_backing_file) { 4809 /* We don't how much of the backing chain is shared by the input 4810 * image and the new image file. In the worst case the new image's 4811 * backing file has nothing in common with the input image. Be 4812 * conservative and assume all clusters need to be written. 4813 */ 4814 required = virtual_size; 4815 } else { 4816 int64_t offset; 4817 int64_t pnum = 0; 4818 4819 for (offset = 0; offset < ssize; offset += pnum) { 4820 int ret; 4821 4822 ret = bdrv_block_status_above(in_bs, NULL, offset, 4823 ssize - offset, &pnum, NULL, 4824 NULL); 4825 if (ret < 0) { 4826 error_setg_errno(&local_err, -ret, 4827 "Unable to get block status"); 4828 goto err; 4829 } 4830 4831 if (ret & BDRV_BLOCK_ZERO) { 4832 /* Skip zero regions (safe with no backing file) */ 4833 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) == 4834 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) { 4835 /* Extend pnum to end of cluster for next iteration */ 4836 pnum = ROUND_UP(offset + pnum, cluster_size) - offset; 4837 4838 /* Count clusters we've seen */ 4839 required += offset % cluster_size + pnum; 4840 } 4841 } 4842 } 4843 } 4844 4845 /* Take into account preallocation. Nothing special is needed for 4846 * PREALLOC_MODE_METADATA since metadata is always counted. 4847 */ 4848 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) { 4849 required = virtual_size; 4850 } 4851 4852 info = g_new(BlockMeasureInfo, 1); 4853 info->fully_allocated = 4854 qcow2_calc_prealloc_size(virtual_size, cluster_size, 4855 ctz32(refcount_bits)) + luks_payload_size; 4856 4857 /* Remove data clusters that are not required. This overestimates the 4858 * required size because metadata needed for the fully allocated file is 4859 * still counted. 4860 */ 4861 info->required = info->fully_allocated - virtual_size + required; 4862 return info; 4863 4864 err: 4865 error_propagate(errp, local_err); 4866 return NULL; 4867 } 4868 4869 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 4870 { 4871 BDRVQcow2State *s = bs->opaque; 4872 bdi->unallocated_blocks_are_zero = true; 4873 bdi->cluster_size = s->cluster_size; 4874 bdi->vm_state_offset = qcow2_vm_state_offset(s); 4875 return 0; 4876 } 4877 4878 static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs, 4879 Error **errp) 4880 { 4881 BDRVQcow2State *s = bs->opaque; 4882 ImageInfoSpecific *spec_info; 4883 QCryptoBlockInfo *encrypt_info = NULL; 4884 Error *local_err = NULL; 4885 4886 if (s->crypto != NULL) { 4887 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err); 4888 if (local_err) { 4889 error_propagate(errp, local_err); 4890 return NULL; 4891 } 4892 } 4893 4894 spec_info = g_new(ImageInfoSpecific, 1); 4895 *spec_info = (ImageInfoSpecific){ 4896 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2, 4897 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1), 4898 }; 4899 if (s->qcow_version == 2) { 4900 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4901 .compat = g_strdup("0.10"), 4902 .refcount_bits = s->refcount_bits, 4903 }; 4904 } else if (s->qcow_version == 3) { 4905 Qcow2BitmapInfoList *bitmaps; 4906 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err); 4907 if (local_err) { 4908 error_propagate(errp, local_err); 4909 qapi_free_ImageInfoSpecific(spec_info); 4910 qapi_free_QCryptoBlockInfo(encrypt_info); 4911 return NULL; 4912 } 4913 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){ 4914 .compat = g_strdup("1.1"), 4915 .lazy_refcounts = s->compatible_features & 4916 QCOW2_COMPAT_LAZY_REFCOUNTS, 4917 .has_lazy_refcounts = true, 4918 .corrupt = s->incompatible_features & 4919 QCOW2_INCOMPAT_CORRUPT, 4920 .has_corrupt = true, 4921 .refcount_bits = s->refcount_bits, 4922 .has_bitmaps = !!bitmaps, 4923 .bitmaps = bitmaps, 4924 .has_data_file = !!s->image_data_file, 4925 .data_file = g_strdup(s->image_data_file), 4926 .has_data_file_raw = has_data_file(bs), 4927 .data_file_raw = data_file_is_raw(bs), 4928 }; 4929 } else { 4930 /* if this assertion fails, this probably means a new version was 4931 * added without having it covered here */ 4932 assert(false); 4933 } 4934 4935 if (encrypt_info) { 4936 ImageInfoSpecificQCow2Encryption *qencrypt = 4937 g_new(ImageInfoSpecificQCow2Encryption, 1); 4938 switch (encrypt_info->format) { 4939 case Q_CRYPTO_BLOCK_FORMAT_QCOW: 4940 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES; 4941 break; 4942 case Q_CRYPTO_BLOCK_FORMAT_LUKS: 4943 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS; 4944 qencrypt->u.luks = encrypt_info->u.luks; 4945 break; 4946 default: 4947 abort(); 4948 } 4949 /* Since we did shallow copy above, erase any pointers 4950 * in the original info */ 4951 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u)); 4952 qapi_free_QCryptoBlockInfo(encrypt_info); 4953 4954 spec_info->u.qcow2.data->has_encrypt = true; 4955 spec_info->u.qcow2.data->encrypt = qencrypt; 4956 } 4957 4958 return spec_info; 4959 } 4960 4961 static int qcow2_has_zero_init(BlockDriverState *bs) 4962 { 4963 BDRVQcow2State *s = bs->opaque; 4964 bool preallocated; 4965 4966 if (qemu_in_coroutine()) { 4967 qemu_co_mutex_lock(&s->lock); 4968 } 4969 /* 4970 * Check preallocation status: Preallocated images have all L2 4971 * tables allocated, nonpreallocated images have none. It is 4972 * therefore enough to check the first one. 4973 */ 4974 preallocated = s->l1_size > 0 && s->l1_table[0] != 0; 4975 if (qemu_in_coroutine()) { 4976 qemu_co_mutex_unlock(&s->lock); 4977 } 4978 4979 if (!preallocated) { 4980 return 1; 4981 } else if (bs->encrypted) { 4982 return 0; 4983 } else { 4984 return bdrv_has_zero_init(s->data_file->bs); 4985 } 4986 } 4987 4988 static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4989 int64_t pos) 4990 { 4991 BDRVQcow2State *s = bs->opaque; 4992 4993 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 4994 return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos, 4995 qiov->size, qiov, 0, 0); 4996 } 4997 4998 static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, 4999 int64_t pos) 5000 { 5001 BDRVQcow2State *s = bs->opaque; 5002 5003 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 5004 return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos, 5005 qiov->size, qiov, 0, 0); 5006 } 5007 5008 /* 5009 * Downgrades an image's version. To achieve this, any incompatible features 5010 * have to be removed. 5011 */ 5012 static int qcow2_downgrade(BlockDriverState *bs, int target_version, 5013 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5014 Error **errp) 5015 { 5016 BDRVQcow2State *s = bs->opaque; 5017 int current_version = s->qcow_version; 5018 int ret; 5019 int i; 5020 5021 /* This is qcow2_downgrade(), not qcow2_upgrade() */ 5022 assert(target_version < current_version); 5023 5024 /* There are no other versions (now) that you can downgrade to */ 5025 assert(target_version == 2); 5026 5027 if (s->refcount_order != 4) { 5028 error_setg(errp, "compat=0.10 requires refcount_bits=16"); 5029 return -ENOTSUP; 5030 } 5031 5032 if (has_data_file(bs)) { 5033 error_setg(errp, "Cannot downgrade an image with a data file"); 5034 return -ENOTSUP; 5035 } 5036 5037 /* 5038 * If any internal snapshot has a different size than the current 5039 * image size, or VM state size that exceeds 32 bits, downgrading 5040 * is unsafe. Even though we would still use v3-compliant output 5041 * to preserve that data, other v2 programs might not realize 5042 * those optional fields are important. 5043 */ 5044 for (i = 0; i < s->nb_snapshots; i++) { 5045 if (s->snapshots[i].vm_state_size > UINT32_MAX || 5046 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) { 5047 error_setg(errp, "Internal snapshots prevent downgrade of image"); 5048 return -ENOTSUP; 5049 } 5050 } 5051 5052 /* clear incompatible features */ 5053 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 5054 ret = qcow2_mark_clean(bs); 5055 if (ret < 0) { 5056 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5057 return ret; 5058 } 5059 } 5060 5061 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in 5062 * the first place; if that happens nonetheless, returning -ENOTSUP is the 5063 * best thing to do anyway */ 5064 5065 if (s->incompatible_features) { 5066 error_setg(errp, "Cannot downgrade an image with incompatible features " 5067 "%#" PRIx64 " set", s->incompatible_features); 5068 return -ENOTSUP; 5069 } 5070 5071 /* since we can ignore compatible features, we can set them to 0 as well */ 5072 s->compatible_features = 0; 5073 /* if lazy refcounts have been used, they have already been fixed through 5074 * clearing the dirty flag */ 5075 5076 /* clearing autoclear features is trivial */ 5077 s->autoclear_features = 0; 5078 5079 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque); 5080 if (ret < 0) { 5081 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters"); 5082 return ret; 5083 } 5084 5085 s->qcow_version = target_version; 5086 ret = qcow2_update_header(bs); 5087 if (ret < 0) { 5088 s->qcow_version = current_version; 5089 error_setg_errno(errp, -ret, "Failed to update the image header"); 5090 return ret; 5091 } 5092 return 0; 5093 } 5094 5095 /* 5096 * Upgrades an image's version. While newer versions encompass all 5097 * features of older versions, some things may have to be presented 5098 * differently. 5099 */ 5100 static int qcow2_upgrade(BlockDriverState *bs, int target_version, 5101 BlockDriverAmendStatusCB *status_cb, void *cb_opaque, 5102 Error **errp) 5103 { 5104 BDRVQcow2State *s = bs->opaque; 5105 bool need_snapshot_update; 5106 int current_version = s->qcow_version; 5107 int i; 5108 int ret; 5109 5110 /* This is qcow2_upgrade(), not qcow2_downgrade() */ 5111 assert(target_version > current_version); 5112 5113 /* There are no other versions (yet) that you can upgrade to */ 5114 assert(target_version == 3); 5115 5116 status_cb(bs, 0, 2, cb_opaque); 5117 5118 /* 5119 * In v2, snapshots do not need to have extra data. v3 requires 5120 * the 64-bit VM state size and the virtual disk size to be 5121 * present. 5122 * qcow2_write_snapshots() will always write the list in the 5123 * v3-compliant format. 5124 */ 5125 need_snapshot_update = false; 5126 for (i = 0; i < s->nb_snapshots; i++) { 5127 if (s->snapshots[i].extra_data_size < 5128 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) + 5129 sizeof_field(QCowSnapshotExtraData, disk_size)) 5130 { 5131 need_snapshot_update = true; 5132 break; 5133 } 5134 } 5135 if (need_snapshot_update) { 5136 ret = qcow2_write_snapshots(bs); 5137 if (ret < 0) { 5138 error_setg_errno(errp, -ret, "Failed to update the snapshot table"); 5139 return ret; 5140 } 5141 } 5142 status_cb(bs, 1, 2, cb_opaque); 5143 5144 s->qcow_version = target_version; 5145 ret = qcow2_update_header(bs); 5146 if (ret < 0) { 5147 s->qcow_version = current_version; 5148 error_setg_errno(errp, -ret, "Failed to update the image header"); 5149 return ret; 5150 } 5151 status_cb(bs, 2, 2, cb_opaque); 5152 5153 return 0; 5154 } 5155 5156 typedef enum Qcow2AmendOperation { 5157 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be 5158 * statically initialized to so that the helper CB can discern the first 5159 * invocation from an operation change */ 5160 QCOW2_NO_OPERATION = 0, 5161 5162 QCOW2_UPGRADING, 5163 QCOW2_CHANGING_REFCOUNT_ORDER, 5164 QCOW2_DOWNGRADING, 5165 } Qcow2AmendOperation; 5166 5167 typedef struct Qcow2AmendHelperCBInfo { 5168 /* The code coordinating the amend operations should only modify 5169 * these four fields; the rest will be managed by the CB */ 5170 BlockDriverAmendStatusCB *original_status_cb; 5171 void *original_cb_opaque; 5172 5173 Qcow2AmendOperation current_operation; 5174 5175 /* Total number of operations to perform (only set once) */ 5176 int total_operations; 5177 5178 /* The following fields are managed by the CB */ 5179 5180 /* Number of operations completed */ 5181 int operations_completed; 5182 5183 /* Cumulative offset of all completed operations */ 5184 int64_t offset_completed; 5185 5186 Qcow2AmendOperation last_operation; 5187 int64_t last_work_size; 5188 } Qcow2AmendHelperCBInfo; 5189 5190 static void qcow2_amend_helper_cb(BlockDriverState *bs, 5191 int64_t operation_offset, 5192 int64_t operation_work_size, void *opaque) 5193 { 5194 Qcow2AmendHelperCBInfo *info = opaque; 5195 int64_t current_work_size; 5196 int64_t projected_work_size; 5197 5198 if (info->current_operation != info->last_operation) { 5199 if (info->last_operation != QCOW2_NO_OPERATION) { 5200 info->offset_completed += info->last_work_size; 5201 info->operations_completed++; 5202 } 5203 5204 info->last_operation = info->current_operation; 5205 } 5206 5207 assert(info->total_operations > 0); 5208 assert(info->operations_completed < info->total_operations); 5209 5210 info->last_work_size = operation_work_size; 5211 5212 current_work_size = info->offset_completed + operation_work_size; 5213 5214 /* current_work_size is the total work size for (operations_completed + 1) 5215 * operations (which includes this one), so multiply it by the number of 5216 * operations not covered and divide it by the number of operations 5217 * covered to get a projection for the operations not covered */ 5218 projected_work_size = current_work_size * (info->total_operations - 5219 info->operations_completed - 1) 5220 / (info->operations_completed + 1); 5221 5222 info->original_status_cb(bs, info->offset_completed + operation_offset, 5223 current_work_size + projected_work_size, 5224 info->original_cb_opaque); 5225 } 5226 5227 static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts, 5228 BlockDriverAmendStatusCB *status_cb, 5229 void *cb_opaque, 5230 Error **errp) 5231 { 5232 BDRVQcow2State *s = bs->opaque; 5233 int old_version = s->qcow_version, new_version = old_version; 5234 uint64_t new_size = 0; 5235 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL; 5236 bool lazy_refcounts = s->use_lazy_refcounts; 5237 bool data_file_raw = data_file_is_raw(bs); 5238 const char *compat = NULL; 5239 uint64_t cluster_size = s->cluster_size; 5240 bool encrypt; 5241 int encformat; 5242 int refcount_bits = s->refcount_bits; 5243 int ret; 5244 QemuOptDesc *desc = opts->list->desc; 5245 Qcow2AmendHelperCBInfo helper_cb_info; 5246 5247 while (desc && desc->name) { 5248 if (!qemu_opt_find(opts, desc->name)) { 5249 /* only change explicitly defined options */ 5250 desc++; 5251 continue; 5252 } 5253 5254 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) { 5255 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL); 5256 if (!compat) { 5257 /* preserve default */ 5258 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) { 5259 new_version = 2; 5260 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) { 5261 new_version = 3; 5262 } else { 5263 error_setg(errp, "Unknown compatibility level %s", compat); 5264 return -EINVAL; 5265 } 5266 } else if (!strcmp(desc->name, BLOCK_OPT_PREALLOC)) { 5267 error_setg(errp, "Cannot change preallocation mode"); 5268 return -ENOTSUP; 5269 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) { 5270 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0); 5271 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) { 5272 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE); 5273 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) { 5274 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT); 5275 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT)) { 5276 encrypt = qemu_opt_get_bool(opts, BLOCK_OPT_ENCRYPT, 5277 !!s->crypto); 5278 5279 if (encrypt != !!s->crypto) { 5280 error_setg(errp, 5281 "Changing the encryption flag is not supported"); 5282 return -ENOTSUP; 5283 } 5284 } else if (!strcmp(desc->name, BLOCK_OPT_ENCRYPT_FORMAT)) { 5285 encformat = qcow2_crypt_method_from_format( 5286 qemu_opt_get(opts, BLOCK_OPT_ENCRYPT_FORMAT)); 5287 5288 if (encformat != s->crypt_method_header) { 5289 error_setg(errp, 5290 "Changing the encryption format is not supported"); 5291 return -ENOTSUP; 5292 } 5293 } else if (g_str_has_prefix(desc->name, "encrypt.")) { 5294 error_setg(errp, 5295 "Changing the encryption parameters is not supported"); 5296 return -ENOTSUP; 5297 } else if (!strcmp(desc->name, BLOCK_OPT_CLUSTER_SIZE)) { 5298 cluster_size = qemu_opt_get_size(opts, BLOCK_OPT_CLUSTER_SIZE, 5299 cluster_size); 5300 if (cluster_size != s->cluster_size) { 5301 error_setg(errp, "Changing the cluster size is not supported"); 5302 return -ENOTSUP; 5303 } 5304 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 5305 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS, 5306 lazy_refcounts); 5307 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) { 5308 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS, 5309 refcount_bits); 5310 5311 if (refcount_bits <= 0 || refcount_bits > 64 || 5312 !is_power_of_2(refcount_bits)) 5313 { 5314 error_setg(errp, "Refcount width must be a power of two and " 5315 "may not exceed 64 bits"); 5316 return -EINVAL; 5317 } 5318 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) { 5319 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE); 5320 if (data_file && !has_data_file(bs)) { 5321 error_setg(errp, "data-file can only be set for images that " 5322 "use an external data file"); 5323 return -EINVAL; 5324 } 5325 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) { 5326 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW, 5327 data_file_raw); 5328 if (data_file_raw && !data_file_is_raw(bs)) { 5329 error_setg(errp, "data-file-raw cannot be set on existing " 5330 "images"); 5331 return -EINVAL; 5332 } 5333 } else { 5334 /* if this point is reached, this probably means a new option was 5335 * added without having it covered here */ 5336 abort(); 5337 } 5338 5339 desc++; 5340 } 5341 5342 helper_cb_info = (Qcow2AmendHelperCBInfo){ 5343 .original_status_cb = status_cb, 5344 .original_cb_opaque = cb_opaque, 5345 .total_operations = (new_version != old_version) 5346 + (s->refcount_bits != refcount_bits) 5347 }; 5348 5349 /* Upgrade first (some features may require compat=1.1) */ 5350 if (new_version > old_version) { 5351 helper_cb_info.current_operation = QCOW2_UPGRADING; 5352 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb, 5353 &helper_cb_info, errp); 5354 if (ret < 0) { 5355 return ret; 5356 } 5357 } 5358 5359 if (s->refcount_bits != refcount_bits) { 5360 int refcount_order = ctz32(refcount_bits); 5361 5362 if (new_version < 3 && refcount_bits != 16) { 5363 error_setg(errp, "Refcount widths other than 16 bits require " 5364 "compatibility level 1.1 or above (use compat=1.1 or " 5365 "greater)"); 5366 return -EINVAL; 5367 } 5368 5369 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER; 5370 ret = qcow2_change_refcount_order(bs, refcount_order, 5371 &qcow2_amend_helper_cb, 5372 &helper_cb_info, errp); 5373 if (ret < 0) { 5374 return ret; 5375 } 5376 } 5377 5378 /* data-file-raw blocks backing files, so clear it first if requested */ 5379 if (data_file_raw) { 5380 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5381 } else { 5382 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW; 5383 } 5384 5385 if (data_file) { 5386 g_free(s->image_data_file); 5387 s->image_data_file = *data_file ? g_strdup(data_file) : NULL; 5388 } 5389 5390 ret = qcow2_update_header(bs); 5391 if (ret < 0) { 5392 error_setg_errno(errp, -ret, "Failed to update the image header"); 5393 return ret; 5394 } 5395 5396 if (backing_file || backing_format) { 5397 ret = qcow2_change_backing_file(bs, 5398 backing_file ?: s->image_backing_file, 5399 backing_format ?: s->image_backing_format); 5400 if (ret < 0) { 5401 error_setg_errno(errp, -ret, "Failed to change the backing file"); 5402 return ret; 5403 } 5404 } 5405 5406 if (s->use_lazy_refcounts != lazy_refcounts) { 5407 if (lazy_refcounts) { 5408 if (new_version < 3) { 5409 error_setg(errp, "Lazy refcounts only supported with " 5410 "compatibility level 1.1 and above (use compat=1.1 " 5411 "or greater)"); 5412 return -EINVAL; 5413 } 5414 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5415 ret = qcow2_update_header(bs); 5416 if (ret < 0) { 5417 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5418 error_setg_errno(errp, -ret, "Failed to update the image header"); 5419 return ret; 5420 } 5421 s->use_lazy_refcounts = true; 5422 } else { 5423 /* make image clean first */ 5424 ret = qcow2_mark_clean(bs); 5425 if (ret < 0) { 5426 error_setg_errno(errp, -ret, "Failed to make the image clean"); 5427 return ret; 5428 } 5429 /* now disallow lazy refcounts */ 5430 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS; 5431 ret = qcow2_update_header(bs); 5432 if (ret < 0) { 5433 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS; 5434 error_setg_errno(errp, -ret, "Failed to update the image header"); 5435 return ret; 5436 } 5437 s->use_lazy_refcounts = false; 5438 } 5439 } 5440 5441 if (new_size) { 5442 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, 5443 errp); 5444 if (!blk) { 5445 return -EPERM; 5446 } 5447 5448 /* 5449 * Amending image options should ensure that the image has 5450 * exactly the given new values, so pass exact=true here. 5451 */ 5452 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp); 5453 blk_unref(blk); 5454 if (ret < 0) { 5455 return ret; 5456 } 5457 } 5458 5459 /* Downgrade last (so unsupported features can be removed before) */ 5460 if (new_version < old_version) { 5461 helper_cb_info.current_operation = QCOW2_DOWNGRADING; 5462 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb, 5463 &helper_cb_info, errp); 5464 if (ret < 0) { 5465 return ret; 5466 } 5467 } 5468 5469 return 0; 5470 } 5471 5472 /* 5473 * If offset or size are negative, respectively, they will not be included in 5474 * the BLOCK_IMAGE_CORRUPTED event emitted. 5475 * fatal will be ignored for read-only BDS; corruptions found there will always 5476 * be considered non-fatal. 5477 */ 5478 void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset, 5479 int64_t size, const char *message_format, ...) 5480 { 5481 BDRVQcow2State *s = bs->opaque; 5482 const char *node_name; 5483 char *message; 5484 va_list ap; 5485 5486 fatal = fatal && bdrv_is_writable(bs); 5487 5488 if (s->signaled_corruption && 5489 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT))) 5490 { 5491 return; 5492 } 5493 5494 va_start(ap, message_format); 5495 message = g_strdup_vprintf(message_format, ap); 5496 va_end(ap); 5497 5498 if (fatal) { 5499 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further " 5500 "corruption events will be suppressed\n", message); 5501 } else { 5502 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal " 5503 "corruption events will be suppressed\n", message); 5504 } 5505 5506 node_name = bdrv_get_node_name(bs); 5507 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs), 5508 *node_name != '\0', node_name, 5509 message, offset >= 0, offset, 5510 size >= 0, size, 5511 fatal); 5512 g_free(message); 5513 5514 if (fatal) { 5515 qcow2_mark_corrupt(bs); 5516 bs->drv = NULL; /* make BDS unusable */ 5517 } 5518 5519 s->signaled_corruption = true; 5520 } 5521 5522 static QemuOptsList qcow2_create_opts = { 5523 .name = "qcow2-create-opts", 5524 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head), 5525 .desc = { 5526 { 5527 .name = BLOCK_OPT_SIZE, 5528 .type = QEMU_OPT_SIZE, 5529 .help = "Virtual disk size" 5530 }, 5531 { 5532 .name = BLOCK_OPT_COMPAT_LEVEL, 5533 .type = QEMU_OPT_STRING, 5534 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" 5535 }, 5536 { 5537 .name = BLOCK_OPT_BACKING_FILE, 5538 .type = QEMU_OPT_STRING, 5539 .help = "File name of a base image" 5540 }, 5541 { 5542 .name = BLOCK_OPT_BACKING_FMT, 5543 .type = QEMU_OPT_STRING, 5544 .help = "Image format of the base image" 5545 }, 5546 { 5547 .name = BLOCK_OPT_DATA_FILE, 5548 .type = QEMU_OPT_STRING, 5549 .help = "File name of an external data file" 5550 }, 5551 { 5552 .name = BLOCK_OPT_DATA_FILE_RAW, 5553 .type = QEMU_OPT_BOOL, 5554 .help = "The external data file must stay valid as a raw image" 5555 }, 5556 { 5557 .name = BLOCK_OPT_ENCRYPT, 5558 .type = QEMU_OPT_BOOL, 5559 .help = "Encrypt the image with format 'aes'. (Deprecated " 5560 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", 5561 }, 5562 { 5563 .name = BLOCK_OPT_ENCRYPT_FORMAT, 5564 .type = QEMU_OPT_STRING, 5565 .help = "Encrypt the image, format choices: 'aes', 'luks'", 5566 }, 5567 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", 5568 "ID of secret providing qcow AES key or LUKS passphrase"), 5569 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), 5570 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), 5571 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), 5572 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), 5573 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), 5574 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), 5575 { 5576 .name = BLOCK_OPT_CLUSTER_SIZE, 5577 .type = QEMU_OPT_SIZE, 5578 .help = "qcow2 cluster size", 5579 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) 5580 }, 5581 { 5582 .name = BLOCK_OPT_PREALLOC, 5583 .type = QEMU_OPT_STRING, 5584 .help = "Preallocation mode (allowed values: off, metadata, " 5585 "falloc, full)" 5586 }, 5587 { 5588 .name = BLOCK_OPT_LAZY_REFCOUNTS, 5589 .type = QEMU_OPT_BOOL, 5590 .help = "Postpone refcount updates", 5591 .def_value_str = "off" 5592 }, 5593 { 5594 .name = BLOCK_OPT_REFCOUNT_BITS, 5595 .type = QEMU_OPT_NUMBER, 5596 .help = "Width of a reference count entry in bits", 5597 .def_value_str = "16" 5598 }, 5599 { /* end of list */ } 5600 } 5601 }; 5602 5603 static const char *const qcow2_strong_runtime_opts[] = { 5604 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET, 5605 5606 NULL 5607 }; 5608 5609 BlockDriver bdrv_qcow2 = { 5610 .format_name = "qcow2", 5611 .instance_size = sizeof(BDRVQcow2State), 5612 .bdrv_probe = qcow2_probe, 5613 .bdrv_open = qcow2_open, 5614 .bdrv_close = qcow2_close, 5615 .bdrv_reopen_prepare = qcow2_reopen_prepare, 5616 .bdrv_reopen_commit = qcow2_reopen_commit, 5617 .bdrv_reopen_commit_post = qcow2_reopen_commit_post, 5618 .bdrv_reopen_abort = qcow2_reopen_abort, 5619 .bdrv_join_options = qcow2_join_options, 5620 .bdrv_child_perm = bdrv_format_default_perms, 5621 .bdrv_co_create_opts = qcow2_co_create_opts, 5622 .bdrv_co_create = qcow2_co_create, 5623 .bdrv_has_zero_init = qcow2_has_zero_init, 5624 .bdrv_co_block_status = qcow2_co_block_status, 5625 5626 .bdrv_co_preadv_part = qcow2_co_preadv_part, 5627 .bdrv_co_pwritev_part = qcow2_co_pwritev_part, 5628 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 5629 5630 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, 5631 .bdrv_co_pdiscard = qcow2_co_pdiscard, 5632 .bdrv_co_copy_range_from = qcow2_co_copy_range_from, 5633 .bdrv_co_copy_range_to = qcow2_co_copy_range_to, 5634 .bdrv_co_truncate = qcow2_co_truncate, 5635 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part, 5636 .bdrv_make_empty = qcow2_make_empty, 5637 5638 .bdrv_snapshot_create = qcow2_snapshot_create, 5639 .bdrv_snapshot_goto = qcow2_snapshot_goto, 5640 .bdrv_snapshot_delete = qcow2_snapshot_delete, 5641 .bdrv_snapshot_list = qcow2_snapshot_list, 5642 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 5643 .bdrv_measure = qcow2_measure, 5644 .bdrv_get_info = qcow2_get_info, 5645 .bdrv_get_specific_info = qcow2_get_specific_info, 5646 5647 .bdrv_save_vmstate = qcow2_save_vmstate, 5648 .bdrv_load_vmstate = qcow2_load_vmstate, 5649 5650 .supports_backing = true, 5651 .bdrv_change_backing_file = qcow2_change_backing_file, 5652 5653 .bdrv_refresh_limits = qcow2_refresh_limits, 5654 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache, 5655 .bdrv_inactivate = qcow2_inactivate, 5656 5657 .create_opts = &qcow2_create_opts, 5658 .strong_runtime_opts = qcow2_strong_runtime_opts, 5659 .mutable_opts = mutable_opts, 5660 .bdrv_co_check = qcow2_co_check, 5661 .bdrv_amend_options = qcow2_amend_options, 5662 5663 .bdrv_detach_aio_context = qcow2_detach_aio_context, 5664 .bdrv_attach_aio_context = qcow2_attach_aio_context, 5665 5666 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap, 5667 .bdrv_co_remove_persistent_dirty_bitmap = 5668 qcow2_co_remove_persistent_dirty_bitmap, 5669 }; 5670 5671 static void bdrv_qcow2_init(void) 5672 { 5673 bdrv_register(&bdrv_qcow2); 5674 } 5675 5676 block_init(bdrv_qcow2_init); 5677