1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu-common.h" 25 #include "block_int.h" 26 #include "module.h" 27 #include <zlib.h> 28 #include "aes.h" 29 #include "block/qcow2.h" 30 #include "qemu-error.h" 31 #include "qerror.h" 32 #include "trace.h" 33 34 /* 35 Differences with QCOW: 36 37 - Support for multiple incremental snapshots. 38 - Memory management by reference counts. 39 - Clusters which have a reference count of one have the bit 40 QCOW_OFLAG_COPIED to optimize write performance. 41 - Size of compressed clusters is stored in sectors to reduce bit usage 42 in the cluster offsets. 43 - Support for storing additional data (such as the VM state) in the 44 snapshots. 45 - If a backing store is used, the cluster size is not constrained 46 (could be backported to QCOW). 47 - L2 tables have always a size of one cluster. 48 */ 49 50 51 typedef struct { 52 uint32_t magic; 53 uint32_t len; 54 } QCowExtension; 55 56 #define QCOW2_EXT_MAGIC_END 0 57 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 58 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 59 60 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 61 { 62 const QCowHeader *cow_header = (const void *)buf; 63 64 if (buf_size >= sizeof(QCowHeader) && 65 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 66 be32_to_cpu(cow_header->version) >= 2) 67 return 100; 68 else 69 return 0; 70 } 71 72 73 /* 74 * read qcow2 extension and fill bs 75 * start reading from start_offset 76 * finish reading upon magic of value 0 or when end_offset reached 77 * unknown magic is skipped (future extension this version knows nothing about) 78 * return 0 upon success, non-0 otherwise 79 */ 80 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 81 uint64_t end_offset, void **p_feature_table) 82 { 83 BDRVQcowState *s = bs->opaque; 84 QCowExtension ext; 85 uint64_t offset; 86 int ret; 87 88 #ifdef DEBUG_EXT 89 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 90 #endif 91 offset = start_offset; 92 while (offset < end_offset) { 93 94 #ifdef DEBUG_EXT 95 /* Sanity check */ 96 if (offset > s->cluster_size) 97 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 98 99 printf("attempting to read extended header in offset %lu\n", offset); 100 #endif 101 102 if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) { 103 fprintf(stderr, "qcow2_read_extension: ERROR: " 104 "pread fail from offset %" PRIu64 "\n", 105 offset); 106 return 1; 107 } 108 be32_to_cpus(&ext.magic); 109 be32_to_cpus(&ext.len); 110 offset += sizeof(ext); 111 #ifdef DEBUG_EXT 112 printf("ext.magic = 0x%x\n", ext.magic); 113 #endif 114 if (ext.len > end_offset - offset) { 115 error_report("Header extension too large"); 116 return -EINVAL; 117 } 118 119 switch (ext.magic) { 120 case QCOW2_EXT_MAGIC_END: 121 return 0; 122 123 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 124 if (ext.len >= sizeof(bs->backing_format)) { 125 fprintf(stderr, "ERROR: ext_backing_format: len=%u too large" 126 " (>=%zu)\n", 127 ext.len, sizeof(bs->backing_format)); 128 return 2; 129 } 130 if (bdrv_pread(bs->file, offset , bs->backing_format, 131 ext.len) != ext.len) 132 return 3; 133 bs->backing_format[ext.len] = '\0'; 134 #ifdef DEBUG_EXT 135 printf("Qcow2: Got format extension %s\n", bs->backing_format); 136 #endif 137 break; 138 139 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 140 if (p_feature_table != NULL) { 141 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 142 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 143 if (ret < 0) { 144 return ret; 145 } 146 147 *p_feature_table = feature_table; 148 } 149 break; 150 151 default: 152 /* unknown magic - save it in case we need to rewrite the header */ 153 { 154 Qcow2UnknownHeaderExtension *uext; 155 156 uext = g_malloc0(sizeof(*uext) + ext.len); 157 uext->magic = ext.magic; 158 uext->len = ext.len; 159 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 160 161 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 162 if (ret < 0) { 163 return ret; 164 } 165 } 166 break; 167 } 168 169 offset += ((ext.len + 7) & ~7); 170 } 171 172 return 0; 173 } 174 175 static void cleanup_unknown_header_ext(BlockDriverState *bs) 176 { 177 BDRVQcowState *s = bs->opaque; 178 Qcow2UnknownHeaderExtension *uext, *next; 179 180 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 181 QLIST_REMOVE(uext, next); 182 g_free(uext); 183 } 184 } 185 186 static void GCC_FMT_ATTR(2, 3) report_unsupported(BlockDriverState *bs, 187 const char *fmt, ...) 188 { 189 char msg[64]; 190 va_list ap; 191 192 va_start(ap, fmt); 193 vsnprintf(msg, sizeof(msg), fmt, ap); 194 va_end(ap); 195 196 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 197 bs->device_name, "qcow2", msg); 198 } 199 200 static void report_unsupported_feature(BlockDriverState *bs, 201 Qcow2Feature *table, uint64_t mask) 202 { 203 while (table && table->name[0] != '\0') { 204 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 205 if (mask & (1 << table->bit)) { 206 report_unsupported(bs, "%.46s",table->name); 207 mask &= ~(1 << table->bit); 208 } 209 } 210 table++; 211 } 212 213 if (mask) { 214 report_unsupported(bs, "Unknown incompatible feature: %" PRIx64, mask); 215 } 216 } 217 218 /* 219 * Sets the dirty bit and flushes afterwards if necessary. 220 * 221 * The incompatible_features bit is only set if the image file header was 222 * updated successfully. Therefore it is not required to check the return 223 * value of this function. 224 */ 225 int qcow2_mark_dirty(BlockDriverState *bs) 226 { 227 BDRVQcowState *s = bs->opaque; 228 uint64_t val; 229 int ret; 230 231 assert(s->qcow_version >= 3); 232 233 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 234 return 0; /* already dirty */ 235 } 236 237 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); 238 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), 239 &val, sizeof(val)); 240 if (ret < 0) { 241 return ret; 242 } 243 ret = bdrv_flush(bs->file); 244 if (ret < 0) { 245 return ret; 246 } 247 248 /* Only treat image as dirty if the header was updated successfully */ 249 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; 250 return 0; 251 } 252 253 /* 254 * Clears the dirty bit and flushes before if necessary. Only call this 255 * function when there are no pending requests, it does not guard against 256 * concurrent requests dirtying the image. 257 */ 258 static int qcow2_mark_clean(BlockDriverState *bs) 259 { 260 BDRVQcowState *s = bs->opaque; 261 262 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { 263 int ret = bdrv_flush(bs); 264 if (ret < 0) { 265 return ret; 266 } 267 268 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; 269 return qcow2_update_header(bs); 270 } 271 return 0; 272 } 273 274 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 275 BdrvCheckMode fix) 276 { 277 int ret = qcow2_check_refcounts(bs, result, fix); 278 if (ret < 0) { 279 return ret; 280 } 281 282 if (fix && result->check_errors == 0 && result->corruptions == 0) { 283 return qcow2_mark_clean(bs); 284 } 285 return ret; 286 } 287 288 static int qcow2_open(BlockDriverState *bs, int flags) 289 { 290 BDRVQcowState *s = bs->opaque; 291 int len, i, ret = 0; 292 QCowHeader header; 293 uint64_t ext_end; 294 295 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 296 if (ret < 0) { 297 goto fail; 298 } 299 be32_to_cpus(&header.magic); 300 be32_to_cpus(&header.version); 301 be64_to_cpus(&header.backing_file_offset); 302 be32_to_cpus(&header.backing_file_size); 303 be64_to_cpus(&header.size); 304 be32_to_cpus(&header.cluster_bits); 305 be32_to_cpus(&header.crypt_method); 306 be64_to_cpus(&header.l1_table_offset); 307 be32_to_cpus(&header.l1_size); 308 be64_to_cpus(&header.refcount_table_offset); 309 be32_to_cpus(&header.refcount_table_clusters); 310 be64_to_cpus(&header.snapshots_offset); 311 be32_to_cpus(&header.nb_snapshots); 312 313 if (header.magic != QCOW_MAGIC) { 314 ret = -EINVAL; 315 goto fail; 316 } 317 if (header.version < 2 || header.version > 3) { 318 report_unsupported(bs, "QCOW version %d", header.version); 319 ret = -ENOTSUP; 320 goto fail; 321 } 322 323 s->qcow_version = header.version; 324 325 /* Initialise version 3 header fields */ 326 if (header.version == 2) { 327 header.incompatible_features = 0; 328 header.compatible_features = 0; 329 header.autoclear_features = 0; 330 header.refcount_order = 4; 331 header.header_length = 72; 332 } else { 333 be64_to_cpus(&header.incompatible_features); 334 be64_to_cpus(&header.compatible_features); 335 be64_to_cpus(&header.autoclear_features); 336 be32_to_cpus(&header.refcount_order); 337 be32_to_cpus(&header.header_length); 338 } 339 340 if (header.header_length > sizeof(header)) { 341 s->unknown_header_fields_size = header.header_length - sizeof(header); 342 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 343 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 344 s->unknown_header_fields_size); 345 if (ret < 0) { 346 goto fail; 347 } 348 } 349 350 if (header.backing_file_offset) { 351 ext_end = header.backing_file_offset; 352 } else { 353 ext_end = 1 << header.cluster_bits; 354 } 355 356 /* Handle feature bits */ 357 s->incompatible_features = header.incompatible_features; 358 s->compatible_features = header.compatible_features; 359 s->autoclear_features = header.autoclear_features; 360 361 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { 362 void *feature_table = NULL; 363 qcow2_read_extensions(bs, header.header_length, ext_end, 364 &feature_table); 365 report_unsupported_feature(bs, feature_table, 366 s->incompatible_features & 367 ~QCOW2_INCOMPAT_MASK); 368 ret = -ENOTSUP; 369 goto fail; 370 } 371 372 /* Check support for various header values */ 373 if (header.refcount_order != 4) { 374 report_unsupported(bs, "%d bit reference counts", 375 1 << header.refcount_order); 376 ret = -ENOTSUP; 377 goto fail; 378 } 379 380 if (header.cluster_bits < MIN_CLUSTER_BITS || 381 header.cluster_bits > MAX_CLUSTER_BITS) { 382 ret = -EINVAL; 383 goto fail; 384 } 385 if (header.crypt_method > QCOW_CRYPT_AES) { 386 ret = -EINVAL; 387 goto fail; 388 } 389 s->crypt_method_header = header.crypt_method; 390 if (s->crypt_method_header) { 391 bs->encrypted = 1; 392 } 393 s->cluster_bits = header.cluster_bits; 394 s->cluster_size = 1 << s->cluster_bits; 395 s->cluster_sectors = 1 << (s->cluster_bits - 9); 396 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 397 s->l2_size = 1 << s->l2_bits; 398 bs->total_sectors = header.size / 512; 399 s->csize_shift = (62 - (s->cluster_bits - 8)); 400 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 401 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 402 s->refcount_table_offset = header.refcount_table_offset; 403 s->refcount_table_size = 404 header.refcount_table_clusters << (s->cluster_bits - 3); 405 406 s->snapshots_offset = header.snapshots_offset; 407 s->nb_snapshots = header.nb_snapshots; 408 409 /* read the level 1 table */ 410 s->l1_size = header.l1_size; 411 s->l1_vm_state_index = size_to_l1(s, header.size); 412 /* the L1 table must contain at least enough entries to put 413 header.size bytes */ 414 if (s->l1_size < s->l1_vm_state_index) { 415 ret = -EINVAL; 416 goto fail; 417 } 418 s->l1_table_offset = header.l1_table_offset; 419 if (s->l1_size > 0) { 420 s->l1_table = g_malloc0( 421 align_offset(s->l1_size * sizeof(uint64_t), 512)); 422 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 423 s->l1_size * sizeof(uint64_t)); 424 if (ret < 0) { 425 goto fail; 426 } 427 for(i = 0;i < s->l1_size; i++) { 428 be64_to_cpus(&s->l1_table[i]); 429 } 430 } 431 432 /* alloc L2 table/refcount block cache */ 433 s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE); 434 s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE); 435 436 s->cluster_cache = g_malloc(s->cluster_size); 437 /* one more sector for decompressed data alignment */ 438 s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 439 + 512); 440 s->cluster_cache_offset = -1; 441 s->flags = flags; 442 443 ret = qcow2_refcount_init(bs); 444 if (ret != 0) { 445 goto fail; 446 } 447 448 QLIST_INIT(&s->cluster_allocs); 449 450 /* read qcow2 extensions */ 451 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) { 452 ret = -EINVAL; 453 goto fail; 454 } 455 456 /* read the backing file name */ 457 if (header.backing_file_offset != 0) { 458 len = header.backing_file_size; 459 if (len > 1023) { 460 len = 1023; 461 } 462 ret = bdrv_pread(bs->file, header.backing_file_offset, 463 bs->backing_file, len); 464 if (ret < 0) { 465 goto fail; 466 } 467 bs->backing_file[len] = '\0'; 468 } 469 470 ret = qcow2_read_snapshots(bs); 471 if (ret < 0) { 472 goto fail; 473 } 474 475 /* Clear unknown autoclear feature bits */ 476 if (!bs->read_only && s->autoclear_features != 0) { 477 s->autoclear_features = 0; 478 ret = qcow2_update_header(bs); 479 if (ret < 0) { 480 goto fail; 481 } 482 } 483 484 /* Initialise locks */ 485 qemu_co_mutex_init(&s->lock); 486 487 /* Repair image if dirty */ 488 if (!(flags & BDRV_O_CHECK) && !bs->read_only && 489 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { 490 BdrvCheckResult result = {0}; 491 492 ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS); 493 if (ret < 0) { 494 goto fail; 495 } 496 } 497 498 #ifdef DEBUG_ALLOC 499 { 500 BdrvCheckResult result = {0}; 501 qcow2_check_refcounts(bs, &result, 0); 502 } 503 #endif 504 return ret; 505 506 fail: 507 g_free(s->unknown_header_fields); 508 cleanup_unknown_header_ext(bs); 509 qcow2_free_snapshots(bs); 510 qcow2_refcount_close(bs); 511 g_free(s->l1_table); 512 if (s->l2_table_cache) { 513 qcow2_cache_destroy(bs, s->l2_table_cache); 514 } 515 g_free(s->cluster_cache); 516 qemu_vfree(s->cluster_data); 517 return ret; 518 } 519 520 static int qcow2_set_key(BlockDriverState *bs, const char *key) 521 { 522 BDRVQcowState *s = bs->opaque; 523 uint8_t keybuf[16]; 524 int len, i; 525 526 memset(keybuf, 0, 16); 527 len = strlen(key); 528 if (len > 16) 529 len = 16; 530 /* XXX: we could compress the chars to 7 bits to increase 531 entropy */ 532 for(i = 0;i < len;i++) { 533 keybuf[i] = key[i]; 534 } 535 s->crypt_method = s->crypt_method_header; 536 537 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) 538 return -1; 539 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) 540 return -1; 541 #if 0 542 /* test */ 543 { 544 uint8_t in[16]; 545 uint8_t out[16]; 546 uint8_t tmp[16]; 547 for(i=0;i<16;i++) 548 in[i] = i; 549 AES_encrypt(in, tmp, &s->aes_encrypt_key); 550 AES_decrypt(tmp, out, &s->aes_decrypt_key); 551 for(i = 0; i < 16; i++) 552 printf(" %02x", tmp[i]); 553 printf("\n"); 554 for(i = 0; i < 16; i++) 555 printf(" %02x", out[i]); 556 printf("\n"); 557 } 558 #endif 559 return 0; 560 } 561 562 /* We have nothing to do for QCOW2 reopen, stubs just return 563 * success */ 564 static int qcow2_reopen_prepare(BDRVReopenState *state, 565 BlockReopenQueue *queue, Error **errp) 566 { 567 return 0; 568 } 569 570 static int coroutine_fn qcow2_co_is_allocated(BlockDriverState *bs, 571 int64_t sector_num, int nb_sectors, int *pnum) 572 { 573 BDRVQcowState *s = bs->opaque; 574 uint64_t cluster_offset; 575 int ret; 576 577 *pnum = nb_sectors; 578 /* FIXME We can get errors here, but the bdrv_co_is_allocated interface 579 * can't pass them on today */ 580 qemu_co_mutex_lock(&s->lock); 581 ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); 582 qemu_co_mutex_unlock(&s->lock); 583 if (ret < 0) { 584 *pnum = 0; 585 } 586 587 return (cluster_offset != 0); 588 } 589 590 /* handle reading after the end of the backing file */ 591 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 592 int64_t sector_num, int nb_sectors) 593 { 594 int n1; 595 if ((sector_num + nb_sectors) <= bs->total_sectors) 596 return nb_sectors; 597 if (sector_num >= bs->total_sectors) 598 n1 = 0; 599 else 600 n1 = bs->total_sectors - sector_num; 601 602 qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1)); 603 604 return n1; 605 } 606 607 static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, 608 int remaining_sectors, QEMUIOVector *qiov) 609 { 610 BDRVQcowState *s = bs->opaque; 611 int index_in_cluster, n1; 612 int ret; 613 int cur_nr_sectors; /* number of sectors in current iteration */ 614 uint64_t cluster_offset = 0; 615 uint64_t bytes_done = 0; 616 QEMUIOVector hd_qiov; 617 uint8_t *cluster_data = NULL; 618 619 qemu_iovec_init(&hd_qiov, qiov->niov); 620 621 qemu_co_mutex_lock(&s->lock); 622 623 while (remaining_sectors != 0) { 624 625 /* prepare next request */ 626 cur_nr_sectors = remaining_sectors; 627 if (s->crypt_method) { 628 cur_nr_sectors = MIN(cur_nr_sectors, 629 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 630 } 631 632 ret = qcow2_get_cluster_offset(bs, sector_num << 9, 633 &cur_nr_sectors, &cluster_offset); 634 if (ret < 0) { 635 goto fail; 636 } 637 638 index_in_cluster = sector_num & (s->cluster_sectors - 1); 639 640 qemu_iovec_reset(&hd_qiov); 641 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, 642 cur_nr_sectors * 512); 643 644 switch (ret) { 645 case QCOW2_CLUSTER_UNALLOCATED: 646 647 if (bs->backing_hd) { 648 /* read from the base image */ 649 n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, 650 sector_num, cur_nr_sectors); 651 if (n1 > 0) { 652 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 653 qemu_co_mutex_unlock(&s->lock); 654 ret = bdrv_co_readv(bs->backing_hd, sector_num, 655 n1, &hd_qiov); 656 qemu_co_mutex_lock(&s->lock); 657 if (ret < 0) { 658 goto fail; 659 } 660 } 661 } else { 662 /* Note: in this case, no need to wait */ 663 qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); 664 } 665 break; 666 667 case QCOW2_CLUSTER_ZERO: 668 if (s->qcow_version < 3) { 669 ret = -EIO; 670 goto fail; 671 } 672 qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); 673 break; 674 675 case QCOW2_CLUSTER_COMPRESSED: 676 /* add AIO support for compressed blocks ? */ 677 ret = qcow2_decompress_cluster(bs, cluster_offset); 678 if (ret < 0) { 679 goto fail; 680 } 681 682 qemu_iovec_from_buf(&hd_qiov, 0, 683 s->cluster_cache + index_in_cluster * 512, 684 512 * cur_nr_sectors); 685 break; 686 687 case QCOW2_CLUSTER_NORMAL: 688 if ((cluster_offset & 511) != 0) { 689 ret = -EIO; 690 goto fail; 691 } 692 693 if (s->crypt_method) { 694 /* 695 * For encrypted images, read everything into a temporary 696 * contiguous buffer on which the AES functions can work. 697 */ 698 if (!cluster_data) { 699 cluster_data = 700 qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 701 } 702 703 assert(cur_nr_sectors <= 704 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 705 qemu_iovec_reset(&hd_qiov); 706 qemu_iovec_add(&hd_qiov, cluster_data, 707 512 * cur_nr_sectors); 708 } 709 710 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 711 qemu_co_mutex_unlock(&s->lock); 712 ret = bdrv_co_readv(bs->file, 713 (cluster_offset >> 9) + index_in_cluster, 714 cur_nr_sectors, &hd_qiov); 715 qemu_co_mutex_lock(&s->lock); 716 if (ret < 0) { 717 goto fail; 718 } 719 if (s->crypt_method) { 720 qcow2_encrypt_sectors(s, sector_num, cluster_data, 721 cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); 722 qemu_iovec_from_buf(qiov, bytes_done, 723 cluster_data, 512 * cur_nr_sectors); 724 } 725 break; 726 727 default: 728 g_assert_not_reached(); 729 ret = -EIO; 730 goto fail; 731 } 732 733 remaining_sectors -= cur_nr_sectors; 734 sector_num += cur_nr_sectors; 735 bytes_done += cur_nr_sectors * 512; 736 } 737 ret = 0; 738 739 fail: 740 qemu_co_mutex_unlock(&s->lock); 741 742 qemu_iovec_destroy(&hd_qiov); 743 qemu_vfree(cluster_data); 744 745 return ret; 746 } 747 748 static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, 749 int64_t sector_num, 750 int remaining_sectors, 751 QEMUIOVector *qiov) 752 { 753 BDRVQcowState *s = bs->opaque; 754 int index_in_cluster; 755 int n_end; 756 int ret; 757 int cur_nr_sectors; /* number of sectors in current iteration */ 758 uint64_t cluster_offset; 759 QEMUIOVector hd_qiov; 760 uint64_t bytes_done = 0; 761 uint8_t *cluster_data = NULL; 762 QCowL2Meta *l2meta; 763 764 trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num, 765 remaining_sectors); 766 767 qemu_iovec_init(&hd_qiov, qiov->niov); 768 769 s->cluster_cache_offset = -1; /* disable compressed cache */ 770 771 qemu_co_mutex_lock(&s->lock); 772 773 while (remaining_sectors != 0) { 774 775 l2meta = NULL; 776 777 trace_qcow2_writev_start_part(qemu_coroutine_self()); 778 index_in_cluster = sector_num & (s->cluster_sectors - 1); 779 n_end = index_in_cluster + remaining_sectors; 780 if (s->crypt_method && 781 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { 782 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; 783 } 784 785 ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, 786 index_in_cluster, n_end, &cur_nr_sectors, &cluster_offset, &l2meta); 787 if (ret < 0) { 788 goto fail; 789 } 790 791 assert((cluster_offset & 511) == 0); 792 793 qemu_iovec_reset(&hd_qiov); 794 qemu_iovec_concat(&hd_qiov, qiov, bytes_done, 795 cur_nr_sectors * 512); 796 797 if (s->crypt_method) { 798 if (!cluster_data) { 799 cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * 800 s->cluster_size); 801 } 802 803 assert(hd_qiov.size <= 804 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 805 qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); 806 807 qcow2_encrypt_sectors(s, sector_num, cluster_data, 808 cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); 809 810 qemu_iovec_reset(&hd_qiov); 811 qemu_iovec_add(&hd_qiov, cluster_data, 812 cur_nr_sectors * 512); 813 } 814 815 qemu_co_mutex_unlock(&s->lock); 816 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 817 trace_qcow2_writev_data(qemu_coroutine_self(), 818 (cluster_offset >> 9) + index_in_cluster); 819 ret = bdrv_co_writev(bs->file, 820 (cluster_offset >> 9) + index_in_cluster, 821 cur_nr_sectors, &hd_qiov); 822 qemu_co_mutex_lock(&s->lock); 823 if (ret < 0) { 824 goto fail; 825 } 826 827 if (l2meta != NULL) { 828 ret = qcow2_alloc_cluster_link_l2(bs, l2meta); 829 if (ret < 0) { 830 goto fail; 831 } 832 833 /* Take the request off the list of running requests */ 834 if (l2meta->nb_clusters != 0) { 835 QLIST_REMOVE(l2meta, next_in_flight); 836 } 837 838 qemu_co_mutex_unlock(&s->lock); 839 qemu_co_queue_restart_all(&l2meta->dependent_requests); 840 qemu_co_mutex_lock(&s->lock); 841 842 g_free(l2meta); 843 l2meta = NULL; 844 } 845 846 remaining_sectors -= cur_nr_sectors; 847 sector_num += cur_nr_sectors; 848 bytes_done += cur_nr_sectors * 512; 849 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors); 850 } 851 ret = 0; 852 853 fail: 854 qemu_co_mutex_unlock(&s->lock); 855 856 if (l2meta != NULL) { 857 if (l2meta->nb_clusters != 0) { 858 QLIST_REMOVE(l2meta, next_in_flight); 859 } 860 qemu_co_queue_restart_all(&l2meta->dependent_requests); 861 g_free(l2meta); 862 } 863 864 qemu_iovec_destroy(&hd_qiov); 865 qemu_vfree(cluster_data); 866 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 867 868 return ret; 869 } 870 871 static void qcow2_close(BlockDriverState *bs) 872 { 873 BDRVQcowState *s = bs->opaque; 874 g_free(s->l1_table); 875 876 qcow2_cache_flush(bs, s->l2_table_cache); 877 qcow2_cache_flush(bs, s->refcount_block_cache); 878 879 qcow2_mark_clean(bs); 880 881 qcow2_cache_destroy(bs, s->l2_table_cache); 882 qcow2_cache_destroy(bs, s->refcount_block_cache); 883 884 g_free(s->unknown_header_fields); 885 cleanup_unknown_header_ext(bs); 886 887 g_free(s->cluster_cache); 888 qemu_vfree(s->cluster_data); 889 qcow2_refcount_close(bs); 890 qcow2_free_snapshots(bs); 891 } 892 893 static void qcow2_invalidate_cache(BlockDriverState *bs) 894 { 895 BDRVQcowState *s = bs->opaque; 896 int flags = s->flags; 897 AES_KEY aes_encrypt_key; 898 AES_KEY aes_decrypt_key; 899 uint32_t crypt_method = 0; 900 901 /* 902 * Backing files are read-only which makes all of their metadata immutable, 903 * that means we don't have to worry about reopening them here. 904 */ 905 906 if (s->crypt_method) { 907 crypt_method = s->crypt_method; 908 memcpy(&aes_encrypt_key, &s->aes_encrypt_key, sizeof(aes_encrypt_key)); 909 memcpy(&aes_decrypt_key, &s->aes_decrypt_key, sizeof(aes_decrypt_key)); 910 } 911 912 qcow2_close(bs); 913 914 memset(s, 0, sizeof(BDRVQcowState)); 915 qcow2_open(bs, flags); 916 917 if (crypt_method) { 918 s->crypt_method = crypt_method; 919 memcpy(&s->aes_encrypt_key, &aes_encrypt_key, sizeof(aes_encrypt_key)); 920 memcpy(&s->aes_decrypt_key, &aes_decrypt_key, sizeof(aes_decrypt_key)); 921 } 922 } 923 924 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 925 size_t len, size_t buflen) 926 { 927 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 928 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 929 930 if (buflen < ext_len) { 931 return -ENOSPC; 932 } 933 934 *ext_backing_fmt = (QCowExtension) { 935 .magic = cpu_to_be32(magic), 936 .len = cpu_to_be32(len), 937 }; 938 memcpy(buf + sizeof(QCowExtension), s, len); 939 940 return ext_len; 941 } 942 943 /* 944 * Updates the qcow2 header, including the variable length parts of it, i.e. 945 * the backing file name and all extensions. qcow2 was not designed to allow 946 * such changes, so if we run out of space (we can only use the first cluster) 947 * this function may fail. 948 * 949 * Returns 0 on success, -errno in error cases. 950 */ 951 int qcow2_update_header(BlockDriverState *bs) 952 { 953 BDRVQcowState *s = bs->opaque; 954 QCowHeader *header; 955 char *buf; 956 size_t buflen = s->cluster_size; 957 int ret; 958 uint64_t total_size; 959 uint32_t refcount_table_clusters; 960 size_t header_length; 961 Qcow2UnknownHeaderExtension *uext; 962 963 buf = qemu_blockalign(bs, buflen); 964 965 /* Header structure */ 966 header = (QCowHeader*) buf; 967 968 if (buflen < sizeof(*header)) { 969 ret = -ENOSPC; 970 goto fail; 971 } 972 973 header_length = sizeof(*header) + s->unknown_header_fields_size; 974 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 975 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 976 977 *header = (QCowHeader) { 978 /* Version 2 fields */ 979 .magic = cpu_to_be32(QCOW_MAGIC), 980 .version = cpu_to_be32(s->qcow_version), 981 .backing_file_offset = 0, 982 .backing_file_size = 0, 983 .cluster_bits = cpu_to_be32(s->cluster_bits), 984 .size = cpu_to_be64(total_size), 985 .crypt_method = cpu_to_be32(s->crypt_method_header), 986 .l1_size = cpu_to_be32(s->l1_size), 987 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 988 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 989 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 990 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 991 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 992 993 /* Version 3 fields */ 994 .incompatible_features = cpu_to_be64(s->incompatible_features), 995 .compatible_features = cpu_to_be64(s->compatible_features), 996 .autoclear_features = cpu_to_be64(s->autoclear_features), 997 .refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT), 998 .header_length = cpu_to_be32(header_length), 999 }; 1000 1001 /* For older versions, write a shorter header */ 1002 switch (s->qcow_version) { 1003 case 2: 1004 ret = offsetof(QCowHeader, incompatible_features); 1005 break; 1006 case 3: 1007 ret = sizeof(*header); 1008 break; 1009 default: 1010 ret = -EINVAL; 1011 goto fail; 1012 } 1013 1014 buf += ret; 1015 buflen -= ret; 1016 memset(buf, 0, buflen); 1017 1018 /* Preserve any unknown field in the header */ 1019 if (s->unknown_header_fields_size) { 1020 if (buflen < s->unknown_header_fields_size) { 1021 ret = -ENOSPC; 1022 goto fail; 1023 } 1024 1025 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 1026 buf += s->unknown_header_fields_size; 1027 buflen -= s->unknown_header_fields_size; 1028 } 1029 1030 /* Backing file format header extension */ 1031 if (*bs->backing_format) { 1032 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 1033 bs->backing_format, strlen(bs->backing_format), 1034 buflen); 1035 if (ret < 0) { 1036 goto fail; 1037 } 1038 1039 buf += ret; 1040 buflen -= ret; 1041 } 1042 1043 /* Feature table */ 1044 Qcow2Feature features[] = { 1045 { 1046 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, 1047 .bit = QCOW2_INCOMPAT_DIRTY_BITNR, 1048 .name = "dirty bit", 1049 }, 1050 { 1051 .type = QCOW2_FEAT_TYPE_COMPATIBLE, 1052 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, 1053 .name = "lazy refcounts", 1054 }, 1055 }; 1056 1057 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 1058 features, sizeof(features), buflen); 1059 if (ret < 0) { 1060 goto fail; 1061 } 1062 buf += ret; 1063 buflen -= ret; 1064 1065 /* Keep unknown header extensions */ 1066 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 1067 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 1068 if (ret < 0) { 1069 goto fail; 1070 } 1071 1072 buf += ret; 1073 buflen -= ret; 1074 } 1075 1076 /* End of header extensions */ 1077 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 1078 if (ret < 0) { 1079 goto fail; 1080 } 1081 1082 buf += ret; 1083 buflen -= ret; 1084 1085 /* Backing file name */ 1086 if (*bs->backing_file) { 1087 size_t backing_file_len = strlen(bs->backing_file); 1088 1089 if (buflen < backing_file_len) { 1090 ret = -ENOSPC; 1091 goto fail; 1092 } 1093 1094 /* Using strncpy is ok here, since buf is not NUL-terminated. */ 1095 strncpy(buf, bs->backing_file, buflen); 1096 1097 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 1098 header->backing_file_size = cpu_to_be32(backing_file_len); 1099 } 1100 1101 /* Write the new header */ 1102 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 1103 if (ret < 0) { 1104 goto fail; 1105 } 1106 1107 ret = 0; 1108 fail: 1109 qemu_vfree(header); 1110 return ret; 1111 } 1112 1113 static int qcow2_change_backing_file(BlockDriverState *bs, 1114 const char *backing_file, const char *backing_fmt) 1115 { 1116 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 1117 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 1118 1119 return qcow2_update_header(bs); 1120 } 1121 1122 static int preallocate(BlockDriverState *bs) 1123 { 1124 uint64_t nb_sectors; 1125 uint64_t offset; 1126 uint64_t host_offset = 0; 1127 int num; 1128 int ret; 1129 QCowL2Meta *meta; 1130 1131 nb_sectors = bdrv_getlength(bs) >> 9; 1132 offset = 0; 1133 1134 while (nb_sectors) { 1135 num = MIN(nb_sectors, INT_MAX >> 9); 1136 ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, 1137 &host_offset, &meta); 1138 if (ret < 0) { 1139 return ret; 1140 } 1141 1142 ret = qcow2_alloc_cluster_link_l2(bs, meta); 1143 if (ret < 0) { 1144 qcow2_free_any_clusters(bs, meta->alloc_offset, meta->nb_clusters); 1145 return ret; 1146 } 1147 1148 /* There are no dependent requests, but we need to remove our request 1149 * from the list of in-flight requests */ 1150 if (meta != NULL) { 1151 QLIST_REMOVE(meta, next_in_flight); 1152 } 1153 1154 /* TODO Preallocate data if requested */ 1155 1156 nb_sectors -= num; 1157 offset += num << 9; 1158 } 1159 1160 /* 1161 * It is expected that the image file is large enough to actually contain 1162 * all of the allocated clusters (otherwise we get failing reads after 1163 * EOF). Extend the image to the last allocated sector. 1164 */ 1165 if (host_offset != 0) { 1166 uint8_t buf[512]; 1167 memset(buf, 0, 512); 1168 ret = bdrv_write(bs->file, (host_offset >> 9) + num - 1, buf, 1); 1169 if (ret < 0) { 1170 return ret; 1171 } 1172 } 1173 1174 return 0; 1175 } 1176 1177 static int qcow2_create2(const char *filename, int64_t total_size, 1178 const char *backing_file, const char *backing_format, 1179 int flags, size_t cluster_size, int prealloc, 1180 QEMUOptionParameter *options, int version) 1181 { 1182 /* Calculate cluster_bits */ 1183 int cluster_bits; 1184 cluster_bits = ffs(cluster_size) - 1; 1185 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 1186 (1 << cluster_bits) != cluster_size) 1187 { 1188 error_report( 1189 "Cluster size must be a power of two between %d and %dk", 1190 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 1191 return -EINVAL; 1192 } 1193 1194 /* 1195 * Open the image file and write a minimal qcow2 header. 1196 * 1197 * We keep things simple and start with a zero-sized image. We also 1198 * do without refcount blocks or a L1 table for now. We'll fix the 1199 * inconsistency later. 1200 * 1201 * We do need a refcount table because growing the refcount table means 1202 * allocating two new refcount blocks - the seconds of which would be at 1203 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 1204 * size for any qcow2 image. 1205 */ 1206 BlockDriverState* bs; 1207 QCowHeader header; 1208 uint8_t* refcount_table; 1209 int ret; 1210 1211 ret = bdrv_create_file(filename, options); 1212 if (ret < 0) { 1213 return ret; 1214 } 1215 1216 ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR); 1217 if (ret < 0) { 1218 return ret; 1219 } 1220 1221 /* Write the header */ 1222 memset(&header, 0, sizeof(header)); 1223 header.magic = cpu_to_be32(QCOW_MAGIC); 1224 header.version = cpu_to_be32(version); 1225 header.cluster_bits = cpu_to_be32(cluster_bits); 1226 header.size = cpu_to_be64(0); 1227 header.l1_table_offset = cpu_to_be64(0); 1228 header.l1_size = cpu_to_be32(0); 1229 header.refcount_table_offset = cpu_to_be64(cluster_size); 1230 header.refcount_table_clusters = cpu_to_be32(1); 1231 header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT); 1232 header.header_length = cpu_to_be32(sizeof(header)); 1233 1234 if (flags & BLOCK_FLAG_ENCRYPT) { 1235 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); 1236 } else { 1237 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 1238 } 1239 1240 if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { 1241 header.compatible_features |= 1242 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); 1243 } 1244 1245 ret = bdrv_pwrite(bs, 0, &header, sizeof(header)); 1246 if (ret < 0) { 1247 goto out; 1248 } 1249 1250 /* Write an empty refcount table */ 1251 refcount_table = g_malloc0(cluster_size); 1252 ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); 1253 g_free(refcount_table); 1254 1255 if (ret < 0) { 1256 goto out; 1257 } 1258 1259 bdrv_close(bs); 1260 1261 /* 1262 * And now open the image and make it consistent first (i.e. increase the 1263 * refcount of the cluster that is occupied by the header and the refcount 1264 * table) 1265 */ 1266 BlockDriver* drv = bdrv_find_format("qcow2"); 1267 assert(drv != NULL); 1268 ret = bdrv_open(bs, filename, 1269 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv); 1270 if (ret < 0) { 1271 goto out; 1272 } 1273 1274 ret = qcow2_alloc_clusters(bs, 2 * cluster_size); 1275 if (ret < 0) { 1276 goto out; 1277 1278 } else if (ret != 0) { 1279 error_report("Huh, first cluster in empty image is already in use?"); 1280 abort(); 1281 } 1282 1283 /* Okay, now that we have a valid image, let's give it the right size */ 1284 ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); 1285 if (ret < 0) { 1286 goto out; 1287 } 1288 1289 /* Want a backing file? There you go.*/ 1290 if (backing_file) { 1291 ret = bdrv_change_backing_file(bs, backing_file, backing_format); 1292 if (ret < 0) { 1293 goto out; 1294 } 1295 } 1296 1297 /* And if we're supposed to preallocate metadata, do that now */ 1298 if (prealloc) { 1299 BDRVQcowState *s = bs->opaque; 1300 qemu_co_mutex_lock(&s->lock); 1301 ret = preallocate(bs); 1302 qemu_co_mutex_unlock(&s->lock); 1303 if (ret < 0) { 1304 goto out; 1305 } 1306 } 1307 1308 ret = 0; 1309 out: 1310 bdrv_delete(bs); 1311 return ret; 1312 } 1313 1314 static int qcow2_create(const char *filename, QEMUOptionParameter *options) 1315 { 1316 const char *backing_file = NULL; 1317 const char *backing_fmt = NULL; 1318 uint64_t sectors = 0; 1319 int flags = 0; 1320 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 1321 int prealloc = 0; 1322 int version = 2; 1323 1324 /* Read out options */ 1325 while (options && options->name) { 1326 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 1327 sectors = options->value.n / 512; 1328 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 1329 backing_file = options->value.s; 1330 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 1331 backing_fmt = options->value.s; 1332 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { 1333 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; 1334 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 1335 if (options->value.n) { 1336 cluster_size = options->value.n; 1337 } 1338 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 1339 if (!options->value.s || !strcmp(options->value.s, "off")) { 1340 prealloc = 0; 1341 } else if (!strcmp(options->value.s, "metadata")) { 1342 prealloc = 1; 1343 } else { 1344 fprintf(stderr, "Invalid preallocation mode: '%s'\n", 1345 options->value.s); 1346 return -EINVAL; 1347 } 1348 } else if (!strcmp(options->name, BLOCK_OPT_COMPAT_LEVEL)) { 1349 if (!options->value.s || !strcmp(options->value.s, "0.10")) { 1350 version = 2; 1351 } else if (!strcmp(options->value.s, "1.1")) { 1352 version = 3; 1353 } else { 1354 fprintf(stderr, "Invalid compatibility level: '%s'\n", 1355 options->value.s); 1356 return -EINVAL; 1357 } 1358 } else if (!strcmp(options->name, BLOCK_OPT_LAZY_REFCOUNTS)) { 1359 flags |= options->value.n ? BLOCK_FLAG_LAZY_REFCOUNTS : 0; 1360 } 1361 options++; 1362 } 1363 1364 if (backing_file && prealloc) { 1365 fprintf(stderr, "Backing file and preallocation cannot be used at " 1366 "the same time\n"); 1367 return -EINVAL; 1368 } 1369 1370 if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { 1371 fprintf(stderr, "Lazy refcounts only supported with compatibility " 1372 "level 1.1 and above (use compat=1.1 or greater)\n"); 1373 return -EINVAL; 1374 } 1375 1376 return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags, 1377 cluster_size, prealloc, options, version); 1378 } 1379 1380 static int qcow2_make_empty(BlockDriverState *bs) 1381 { 1382 #if 0 1383 /* XXX: not correct */ 1384 BDRVQcowState *s = bs->opaque; 1385 uint32_t l1_length = s->l1_size * sizeof(uint64_t); 1386 int ret; 1387 1388 memset(s->l1_table, 0, l1_length); 1389 if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0) 1390 return -1; 1391 ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length); 1392 if (ret < 0) 1393 return ret; 1394 1395 l2_cache_reset(bs); 1396 #endif 1397 return 0; 1398 } 1399 1400 static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs, 1401 int64_t sector_num, int nb_sectors) 1402 { 1403 int ret; 1404 BDRVQcowState *s = bs->opaque; 1405 1406 /* Emulate misaligned zero writes */ 1407 if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) { 1408 return -ENOTSUP; 1409 } 1410 1411 /* Whatever is left can use real zero clusters */ 1412 qemu_co_mutex_lock(&s->lock); 1413 ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS, 1414 nb_sectors); 1415 qemu_co_mutex_unlock(&s->lock); 1416 1417 return ret; 1418 } 1419 1420 static coroutine_fn int qcow2_co_discard(BlockDriverState *bs, 1421 int64_t sector_num, int nb_sectors) 1422 { 1423 int ret; 1424 BDRVQcowState *s = bs->opaque; 1425 1426 qemu_co_mutex_lock(&s->lock); 1427 ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, 1428 nb_sectors); 1429 qemu_co_mutex_unlock(&s->lock); 1430 return ret; 1431 } 1432 1433 static int qcow2_truncate(BlockDriverState *bs, int64_t offset) 1434 { 1435 BDRVQcowState *s = bs->opaque; 1436 int ret, new_l1_size; 1437 1438 if (offset & 511) { 1439 error_report("The new size must be a multiple of 512"); 1440 return -EINVAL; 1441 } 1442 1443 /* cannot proceed if image has snapshots */ 1444 if (s->nb_snapshots) { 1445 error_report("Can't resize an image which has snapshots"); 1446 return -ENOTSUP; 1447 } 1448 1449 /* shrinking is currently not supported */ 1450 if (offset < bs->total_sectors * 512) { 1451 error_report("qcow2 doesn't support shrinking images yet"); 1452 return -ENOTSUP; 1453 } 1454 1455 new_l1_size = size_to_l1(s, offset); 1456 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 1457 if (ret < 0) { 1458 return ret; 1459 } 1460 1461 /* write updated header.size */ 1462 offset = cpu_to_be64(offset); 1463 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 1464 &offset, sizeof(uint64_t)); 1465 if (ret < 0) { 1466 return ret; 1467 } 1468 1469 s->l1_vm_state_index = new_l1_size; 1470 return 0; 1471 } 1472 1473 /* XXX: put compressed sectors first, then all the cluster aligned 1474 tables to avoid losing bytes in alignment */ 1475 static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, 1476 const uint8_t *buf, int nb_sectors) 1477 { 1478 BDRVQcowState *s = bs->opaque; 1479 z_stream strm; 1480 int ret, out_len; 1481 uint8_t *out_buf; 1482 uint64_t cluster_offset; 1483 1484 if (nb_sectors == 0) { 1485 /* align end of file to a sector boundary to ease reading with 1486 sector based I/Os */ 1487 cluster_offset = bdrv_getlength(bs->file); 1488 cluster_offset = (cluster_offset + 511) & ~511; 1489 bdrv_truncate(bs->file, cluster_offset); 1490 return 0; 1491 } 1492 1493 if (nb_sectors != s->cluster_sectors) 1494 return -EINVAL; 1495 1496 out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); 1497 1498 /* best compression, small window, no zlib header */ 1499 memset(&strm, 0, sizeof(strm)); 1500 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 1501 Z_DEFLATED, -12, 1502 9, Z_DEFAULT_STRATEGY); 1503 if (ret != 0) { 1504 ret = -EINVAL; 1505 goto fail; 1506 } 1507 1508 strm.avail_in = s->cluster_size; 1509 strm.next_in = (uint8_t *)buf; 1510 strm.avail_out = s->cluster_size; 1511 strm.next_out = out_buf; 1512 1513 ret = deflate(&strm, Z_FINISH); 1514 if (ret != Z_STREAM_END && ret != Z_OK) { 1515 deflateEnd(&strm); 1516 ret = -EINVAL; 1517 goto fail; 1518 } 1519 out_len = strm.next_out - out_buf; 1520 1521 deflateEnd(&strm); 1522 1523 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 1524 /* could not compress: write normal cluster */ 1525 ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors); 1526 if (ret < 0) { 1527 goto fail; 1528 } 1529 } else { 1530 cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, 1531 sector_num << 9, out_len); 1532 if (!cluster_offset) { 1533 ret = -EIO; 1534 goto fail; 1535 } 1536 cluster_offset &= s->cluster_offset_mask; 1537 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 1538 ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len); 1539 if (ret < 0) { 1540 goto fail; 1541 } 1542 } 1543 1544 ret = 0; 1545 fail: 1546 g_free(out_buf); 1547 return ret; 1548 } 1549 1550 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 1551 { 1552 BDRVQcowState *s = bs->opaque; 1553 int ret; 1554 1555 qemu_co_mutex_lock(&s->lock); 1556 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1557 if (ret < 0) { 1558 qemu_co_mutex_unlock(&s->lock); 1559 return ret; 1560 } 1561 1562 if (qcow2_need_accurate_refcounts(s)) { 1563 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1564 if (ret < 0) { 1565 qemu_co_mutex_unlock(&s->lock); 1566 return ret; 1567 } 1568 } 1569 qemu_co_mutex_unlock(&s->lock); 1570 1571 return 0; 1572 } 1573 1574 static int64_t qcow2_vm_state_offset(BDRVQcowState *s) 1575 { 1576 return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits); 1577 } 1578 1579 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1580 { 1581 BDRVQcowState *s = bs->opaque; 1582 bdi->cluster_size = s->cluster_size; 1583 bdi->vm_state_offset = qcow2_vm_state_offset(s); 1584 return 0; 1585 } 1586 1587 #if 0 1588 static void dump_refcounts(BlockDriverState *bs) 1589 { 1590 BDRVQcowState *s = bs->opaque; 1591 int64_t nb_clusters, k, k1, size; 1592 int refcount; 1593 1594 size = bdrv_getlength(bs->file); 1595 nb_clusters = size_to_clusters(s, size); 1596 for(k = 0; k < nb_clusters;) { 1597 k1 = k; 1598 refcount = get_refcount(bs, k); 1599 k++; 1600 while (k < nb_clusters && get_refcount(bs, k) == refcount) 1601 k++; 1602 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 1603 k - k1); 1604 } 1605 } 1606 #endif 1607 1608 static int qcow2_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1609 int64_t pos, int size) 1610 { 1611 BDRVQcowState *s = bs->opaque; 1612 int growable = bs->growable; 1613 int ret; 1614 1615 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 1616 bs->growable = 1; 1617 ret = bdrv_pwrite(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1618 bs->growable = growable; 1619 1620 return ret; 1621 } 1622 1623 static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1624 int64_t pos, int size) 1625 { 1626 BDRVQcowState *s = bs->opaque; 1627 int growable = bs->growable; 1628 int ret; 1629 1630 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 1631 bs->growable = 1; 1632 ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1633 bs->growable = growable; 1634 1635 return ret; 1636 } 1637 1638 static QEMUOptionParameter qcow2_create_options[] = { 1639 { 1640 .name = BLOCK_OPT_SIZE, 1641 .type = OPT_SIZE, 1642 .help = "Virtual disk size" 1643 }, 1644 { 1645 .name = BLOCK_OPT_COMPAT_LEVEL, 1646 .type = OPT_STRING, 1647 .help = "Compatibility level (0.10 or 1.1)" 1648 }, 1649 { 1650 .name = BLOCK_OPT_BACKING_FILE, 1651 .type = OPT_STRING, 1652 .help = "File name of a base image" 1653 }, 1654 { 1655 .name = BLOCK_OPT_BACKING_FMT, 1656 .type = OPT_STRING, 1657 .help = "Image format of the base image" 1658 }, 1659 { 1660 .name = BLOCK_OPT_ENCRYPT, 1661 .type = OPT_FLAG, 1662 .help = "Encrypt the image" 1663 }, 1664 { 1665 .name = BLOCK_OPT_CLUSTER_SIZE, 1666 .type = OPT_SIZE, 1667 .help = "qcow2 cluster size", 1668 .value = { .n = DEFAULT_CLUSTER_SIZE }, 1669 }, 1670 { 1671 .name = BLOCK_OPT_PREALLOC, 1672 .type = OPT_STRING, 1673 .help = "Preallocation mode (allowed values: off, metadata)" 1674 }, 1675 { 1676 .name = BLOCK_OPT_LAZY_REFCOUNTS, 1677 .type = OPT_FLAG, 1678 .help = "Postpone refcount updates", 1679 }, 1680 { NULL } 1681 }; 1682 1683 static BlockDriver bdrv_qcow2 = { 1684 .format_name = "qcow2", 1685 .instance_size = sizeof(BDRVQcowState), 1686 .bdrv_probe = qcow2_probe, 1687 .bdrv_open = qcow2_open, 1688 .bdrv_close = qcow2_close, 1689 .bdrv_reopen_prepare = qcow2_reopen_prepare, 1690 .bdrv_create = qcow2_create, 1691 .bdrv_co_is_allocated = qcow2_co_is_allocated, 1692 .bdrv_set_key = qcow2_set_key, 1693 .bdrv_make_empty = qcow2_make_empty, 1694 1695 .bdrv_co_readv = qcow2_co_readv, 1696 .bdrv_co_writev = qcow2_co_writev, 1697 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 1698 1699 .bdrv_co_write_zeroes = qcow2_co_write_zeroes, 1700 .bdrv_co_discard = qcow2_co_discard, 1701 .bdrv_truncate = qcow2_truncate, 1702 .bdrv_write_compressed = qcow2_write_compressed, 1703 1704 .bdrv_snapshot_create = qcow2_snapshot_create, 1705 .bdrv_snapshot_goto = qcow2_snapshot_goto, 1706 .bdrv_snapshot_delete = qcow2_snapshot_delete, 1707 .bdrv_snapshot_list = qcow2_snapshot_list, 1708 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 1709 .bdrv_get_info = qcow2_get_info, 1710 1711 .bdrv_save_vmstate = qcow2_save_vmstate, 1712 .bdrv_load_vmstate = qcow2_load_vmstate, 1713 1714 .bdrv_change_backing_file = qcow2_change_backing_file, 1715 1716 .bdrv_invalidate_cache = qcow2_invalidate_cache, 1717 1718 .create_options = qcow2_create_options, 1719 .bdrv_check = qcow2_check, 1720 }; 1721 1722 static void bdrv_qcow2_init(void) 1723 { 1724 bdrv_register(&bdrv_qcow2); 1725 } 1726 1727 block_init(bdrv_qcow2_init); 1728