1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu-common.h" 25 #include "block_int.h" 26 #include "module.h" 27 #include <zlib.h> 28 #include "aes.h" 29 #include "block/qcow2.h" 30 #include "qemu-error.h" 31 #include "qerror.h" 32 #include "trace.h" 33 34 /* 35 Differences with QCOW: 36 37 - Support for multiple incremental snapshots. 38 - Memory management by reference counts. 39 - Clusters which have a reference count of one have the bit 40 QCOW_OFLAG_COPIED to optimize write performance. 41 - Size of compressed clusters is stored in sectors to reduce bit usage 42 in the cluster offsets. 43 - Support for storing additional data (such as the VM state) in the 44 snapshots. 45 - If a backing store is used, the cluster size is not constrained 46 (could be backported to QCOW). 47 - L2 tables have always a size of one cluster. 48 */ 49 50 51 typedef struct { 52 uint32_t magic; 53 uint32_t len; 54 } QCowExtension; 55 #define QCOW2_EXT_MAGIC_END 0 56 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 57 #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 58 59 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 60 { 61 const QCowHeader *cow_header = (const void *)buf; 62 63 if (buf_size >= sizeof(QCowHeader) && 64 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 65 be32_to_cpu(cow_header->version) >= 2) 66 return 100; 67 else 68 return 0; 69 } 70 71 72 /* 73 * read qcow2 extension and fill bs 74 * start reading from start_offset 75 * finish reading upon magic of value 0 or when end_offset reached 76 * unknown magic is skipped (future extension this version knows nothing about) 77 * return 0 upon success, non-0 otherwise 78 */ 79 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 80 uint64_t end_offset, void **p_feature_table) 81 { 82 BDRVQcowState *s = bs->opaque; 83 QCowExtension ext; 84 uint64_t offset; 85 int ret; 86 87 #ifdef DEBUG_EXT 88 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 89 #endif 90 offset = start_offset; 91 while (offset < end_offset) { 92 93 #ifdef DEBUG_EXT 94 /* Sanity check */ 95 if (offset > s->cluster_size) 96 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 97 98 printf("attempting to read extended header in offset %lu\n", offset); 99 #endif 100 101 if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) { 102 fprintf(stderr, "qcow2_read_extension: ERROR: " 103 "pread fail from offset %" PRIu64 "\n", 104 offset); 105 return 1; 106 } 107 be32_to_cpus(&ext.magic); 108 be32_to_cpus(&ext.len); 109 offset += sizeof(ext); 110 #ifdef DEBUG_EXT 111 printf("ext.magic = 0x%x\n", ext.magic); 112 #endif 113 if (ext.len > end_offset - offset) { 114 error_report("Header extension too large"); 115 return -EINVAL; 116 } 117 118 switch (ext.magic) { 119 case QCOW2_EXT_MAGIC_END: 120 return 0; 121 122 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 123 if (ext.len >= sizeof(bs->backing_format)) { 124 fprintf(stderr, "ERROR: ext_backing_format: len=%u too large" 125 " (>=%zu)\n", 126 ext.len, sizeof(bs->backing_format)); 127 return 2; 128 } 129 if (bdrv_pread(bs->file, offset , bs->backing_format, 130 ext.len) != ext.len) 131 return 3; 132 bs->backing_format[ext.len] = '\0'; 133 #ifdef DEBUG_EXT 134 printf("Qcow2: Got format extension %s\n", bs->backing_format); 135 #endif 136 break; 137 138 case QCOW2_EXT_MAGIC_FEATURE_TABLE: 139 if (p_feature_table != NULL) { 140 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); 141 ret = bdrv_pread(bs->file, offset , feature_table, ext.len); 142 if (ret < 0) { 143 return ret; 144 } 145 146 *p_feature_table = feature_table; 147 } 148 break; 149 150 default: 151 /* unknown magic - save it in case we need to rewrite the header */ 152 { 153 Qcow2UnknownHeaderExtension *uext; 154 155 uext = g_malloc0(sizeof(*uext) + ext.len); 156 uext->magic = ext.magic; 157 uext->len = ext.len; 158 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 159 160 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 161 if (ret < 0) { 162 return ret; 163 } 164 } 165 break; 166 } 167 168 offset += ((ext.len + 7) & ~7); 169 } 170 171 return 0; 172 } 173 174 static void cleanup_unknown_header_ext(BlockDriverState *bs) 175 { 176 BDRVQcowState *s = bs->opaque; 177 Qcow2UnknownHeaderExtension *uext, *next; 178 179 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 180 QLIST_REMOVE(uext, next); 181 g_free(uext); 182 } 183 } 184 185 static void GCC_FMT_ATTR(2, 3) report_unsupported(BlockDriverState *bs, 186 const char *fmt, ...) 187 { 188 char msg[64]; 189 va_list ap; 190 191 va_start(ap, fmt); 192 vsnprintf(msg, sizeof(msg), fmt, ap); 193 va_end(ap); 194 195 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 196 bs->device_name, "qcow2", msg); 197 } 198 199 static void report_unsupported_feature(BlockDriverState *bs, 200 Qcow2Feature *table, uint64_t mask) 201 { 202 while (table && table->name[0] != '\0') { 203 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { 204 if (mask & (1 << table->bit)) { 205 report_unsupported(bs, "%.46s",table->name); 206 mask &= ~(1 << table->bit); 207 } 208 } 209 table++; 210 } 211 212 if (mask) { 213 report_unsupported(bs, "Unknown incompatible feature: %" PRIx64, mask); 214 } 215 } 216 217 static int qcow2_open(BlockDriverState *bs, int flags) 218 { 219 BDRVQcowState *s = bs->opaque; 220 int len, i, ret = 0; 221 QCowHeader header; 222 uint64_t ext_end; 223 224 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 225 if (ret < 0) { 226 goto fail; 227 } 228 be32_to_cpus(&header.magic); 229 be32_to_cpus(&header.version); 230 be64_to_cpus(&header.backing_file_offset); 231 be32_to_cpus(&header.backing_file_size); 232 be64_to_cpus(&header.size); 233 be32_to_cpus(&header.cluster_bits); 234 be32_to_cpus(&header.crypt_method); 235 be64_to_cpus(&header.l1_table_offset); 236 be32_to_cpus(&header.l1_size); 237 be64_to_cpus(&header.refcount_table_offset); 238 be32_to_cpus(&header.refcount_table_clusters); 239 be64_to_cpus(&header.snapshots_offset); 240 be32_to_cpus(&header.nb_snapshots); 241 242 if (header.magic != QCOW_MAGIC) { 243 ret = -EINVAL; 244 goto fail; 245 } 246 if (header.version < 2 || header.version > 3) { 247 report_unsupported(bs, "QCOW version %d", header.version); 248 ret = -ENOTSUP; 249 goto fail; 250 } 251 252 s->qcow_version = header.version; 253 254 /* Initialise version 3 header fields */ 255 if (header.version == 2) { 256 header.incompatible_features = 0; 257 header.compatible_features = 0; 258 header.autoclear_features = 0; 259 header.refcount_order = 4; 260 header.header_length = 72; 261 } else { 262 be64_to_cpus(&header.incompatible_features); 263 be64_to_cpus(&header.compatible_features); 264 be64_to_cpus(&header.autoclear_features); 265 be32_to_cpus(&header.refcount_order); 266 be32_to_cpus(&header.header_length); 267 } 268 269 if (header.header_length > sizeof(header)) { 270 s->unknown_header_fields_size = header.header_length - sizeof(header); 271 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); 272 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, 273 s->unknown_header_fields_size); 274 if (ret < 0) { 275 goto fail; 276 } 277 } 278 279 if (header.backing_file_offset) { 280 ext_end = header.backing_file_offset; 281 } else { 282 ext_end = 1 << header.cluster_bits; 283 } 284 285 /* Handle feature bits */ 286 s->incompatible_features = header.incompatible_features; 287 s->compatible_features = header.compatible_features; 288 s->autoclear_features = header.autoclear_features; 289 290 if (s->incompatible_features != 0) { 291 void *feature_table = NULL; 292 qcow2_read_extensions(bs, header.header_length, ext_end, 293 &feature_table); 294 report_unsupported_feature(bs, feature_table, 295 s->incompatible_features); 296 ret = -ENOTSUP; 297 goto fail; 298 } 299 300 /* Check support for various header values */ 301 if (header.refcount_order != 4) { 302 report_unsupported(bs, "%d bit reference counts", 303 1 << header.refcount_order); 304 ret = -ENOTSUP; 305 goto fail; 306 } 307 308 if (header.cluster_bits < MIN_CLUSTER_BITS || 309 header.cluster_bits > MAX_CLUSTER_BITS) { 310 ret = -EINVAL; 311 goto fail; 312 } 313 if (header.crypt_method > QCOW_CRYPT_AES) { 314 ret = -EINVAL; 315 goto fail; 316 } 317 s->crypt_method_header = header.crypt_method; 318 if (s->crypt_method_header) { 319 bs->encrypted = 1; 320 } 321 s->cluster_bits = header.cluster_bits; 322 s->cluster_size = 1 << s->cluster_bits; 323 s->cluster_sectors = 1 << (s->cluster_bits - 9); 324 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 325 s->l2_size = 1 << s->l2_bits; 326 bs->total_sectors = header.size / 512; 327 s->csize_shift = (62 - (s->cluster_bits - 8)); 328 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 329 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 330 s->refcount_table_offset = header.refcount_table_offset; 331 s->refcount_table_size = 332 header.refcount_table_clusters << (s->cluster_bits - 3); 333 334 s->snapshots_offset = header.snapshots_offset; 335 s->nb_snapshots = header.nb_snapshots; 336 337 /* read the level 1 table */ 338 s->l1_size = header.l1_size; 339 s->l1_vm_state_index = size_to_l1(s, header.size); 340 /* the L1 table must contain at least enough entries to put 341 header.size bytes */ 342 if (s->l1_size < s->l1_vm_state_index) { 343 ret = -EINVAL; 344 goto fail; 345 } 346 s->l1_table_offset = header.l1_table_offset; 347 if (s->l1_size > 0) { 348 s->l1_table = g_malloc0( 349 align_offset(s->l1_size * sizeof(uint64_t), 512)); 350 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 351 s->l1_size * sizeof(uint64_t)); 352 if (ret < 0) { 353 goto fail; 354 } 355 for(i = 0;i < s->l1_size; i++) { 356 be64_to_cpus(&s->l1_table[i]); 357 } 358 } 359 360 /* alloc L2 table/refcount block cache */ 361 s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE); 362 s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE); 363 364 s->cluster_cache = g_malloc(s->cluster_size); 365 /* one more sector for decompressed data alignment */ 366 s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 367 + 512); 368 s->cluster_cache_offset = -1; 369 s->flags = flags; 370 371 ret = qcow2_refcount_init(bs); 372 if (ret != 0) { 373 goto fail; 374 } 375 376 QLIST_INIT(&s->cluster_allocs); 377 378 /* read qcow2 extensions */ 379 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) { 380 ret = -EINVAL; 381 goto fail; 382 } 383 384 /* read the backing file name */ 385 if (header.backing_file_offset != 0) { 386 len = header.backing_file_size; 387 if (len > 1023) { 388 len = 1023; 389 } 390 ret = bdrv_pread(bs->file, header.backing_file_offset, 391 bs->backing_file, len); 392 if (ret < 0) { 393 goto fail; 394 } 395 bs->backing_file[len] = '\0'; 396 } 397 398 ret = qcow2_read_snapshots(bs); 399 if (ret < 0) { 400 goto fail; 401 } 402 403 /* Clear unknown autoclear feature bits */ 404 if (!bs->read_only && s->autoclear_features != 0) { 405 s->autoclear_features = 0; 406 ret = qcow2_update_header(bs); 407 if (ret < 0) { 408 goto fail; 409 } 410 } 411 412 /* Initialise locks */ 413 qemu_co_mutex_init(&s->lock); 414 415 #ifdef DEBUG_ALLOC 416 { 417 BdrvCheckResult result = {0}; 418 qcow2_check_refcounts(bs, &result); 419 } 420 #endif 421 return ret; 422 423 fail: 424 g_free(s->unknown_header_fields); 425 cleanup_unknown_header_ext(bs); 426 qcow2_free_snapshots(bs); 427 qcow2_refcount_close(bs); 428 g_free(s->l1_table); 429 if (s->l2_table_cache) { 430 qcow2_cache_destroy(bs, s->l2_table_cache); 431 } 432 g_free(s->cluster_cache); 433 qemu_vfree(s->cluster_data); 434 return ret; 435 } 436 437 static int qcow2_set_key(BlockDriverState *bs, const char *key) 438 { 439 BDRVQcowState *s = bs->opaque; 440 uint8_t keybuf[16]; 441 int len, i; 442 443 memset(keybuf, 0, 16); 444 len = strlen(key); 445 if (len > 16) 446 len = 16; 447 /* XXX: we could compress the chars to 7 bits to increase 448 entropy */ 449 for(i = 0;i < len;i++) { 450 keybuf[i] = key[i]; 451 } 452 s->crypt_method = s->crypt_method_header; 453 454 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) 455 return -1; 456 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) 457 return -1; 458 #if 0 459 /* test */ 460 { 461 uint8_t in[16]; 462 uint8_t out[16]; 463 uint8_t tmp[16]; 464 for(i=0;i<16;i++) 465 in[i] = i; 466 AES_encrypt(in, tmp, &s->aes_encrypt_key); 467 AES_decrypt(tmp, out, &s->aes_decrypt_key); 468 for(i = 0; i < 16; i++) 469 printf(" %02x", tmp[i]); 470 printf("\n"); 471 for(i = 0; i < 16; i++) 472 printf(" %02x", out[i]); 473 printf("\n"); 474 } 475 #endif 476 return 0; 477 } 478 479 static int coroutine_fn qcow2_co_is_allocated(BlockDriverState *bs, 480 int64_t sector_num, int nb_sectors, int *pnum) 481 { 482 BDRVQcowState *s = bs->opaque; 483 uint64_t cluster_offset; 484 int ret; 485 486 *pnum = nb_sectors; 487 /* FIXME We can get errors here, but the bdrv_co_is_allocated interface 488 * can't pass them on today */ 489 qemu_co_mutex_lock(&s->lock); 490 ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); 491 qemu_co_mutex_unlock(&s->lock); 492 if (ret < 0) { 493 *pnum = 0; 494 } 495 496 return (cluster_offset != 0); 497 } 498 499 /* handle reading after the end of the backing file */ 500 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 501 int64_t sector_num, int nb_sectors) 502 { 503 int n1; 504 if ((sector_num + nb_sectors) <= bs->total_sectors) 505 return nb_sectors; 506 if (sector_num >= bs->total_sectors) 507 n1 = 0; 508 else 509 n1 = bs->total_sectors - sector_num; 510 511 qemu_iovec_memset_skip(qiov, 0, 512 * (nb_sectors - n1), 512 * n1); 512 513 return n1; 514 } 515 516 static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, 517 int remaining_sectors, QEMUIOVector *qiov) 518 { 519 BDRVQcowState *s = bs->opaque; 520 int index_in_cluster, n1; 521 int ret; 522 int cur_nr_sectors; /* number of sectors in current iteration */ 523 uint64_t cluster_offset = 0; 524 uint64_t bytes_done = 0; 525 QEMUIOVector hd_qiov; 526 uint8_t *cluster_data = NULL; 527 528 qemu_iovec_init(&hd_qiov, qiov->niov); 529 530 qemu_co_mutex_lock(&s->lock); 531 532 while (remaining_sectors != 0) { 533 534 /* prepare next request */ 535 cur_nr_sectors = remaining_sectors; 536 if (s->crypt_method) { 537 cur_nr_sectors = MIN(cur_nr_sectors, 538 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 539 } 540 541 ret = qcow2_get_cluster_offset(bs, sector_num << 9, 542 &cur_nr_sectors, &cluster_offset); 543 if (ret < 0) { 544 goto fail; 545 } 546 547 index_in_cluster = sector_num & (s->cluster_sectors - 1); 548 549 qemu_iovec_reset(&hd_qiov); 550 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 551 cur_nr_sectors * 512); 552 553 switch (ret) { 554 case QCOW2_CLUSTER_UNALLOCATED: 555 556 if (bs->backing_hd) { 557 /* read from the base image */ 558 n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, 559 sector_num, cur_nr_sectors); 560 if (n1 > 0) { 561 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 562 qemu_co_mutex_unlock(&s->lock); 563 ret = bdrv_co_readv(bs->backing_hd, sector_num, 564 n1, &hd_qiov); 565 qemu_co_mutex_lock(&s->lock); 566 if (ret < 0) { 567 goto fail; 568 } 569 } 570 } else { 571 /* Note: in this case, no need to wait */ 572 qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); 573 } 574 break; 575 576 case QCOW2_CLUSTER_ZERO: 577 if (s->qcow_version < 3) { 578 ret = -EIO; 579 goto fail; 580 } 581 qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); 582 break; 583 584 case QCOW2_CLUSTER_COMPRESSED: 585 /* add AIO support for compressed blocks ? */ 586 ret = qcow2_decompress_cluster(bs, cluster_offset); 587 if (ret < 0) { 588 goto fail; 589 } 590 591 qemu_iovec_from_buffer(&hd_qiov, 592 s->cluster_cache + index_in_cluster * 512, 593 512 * cur_nr_sectors); 594 break; 595 596 case QCOW2_CLUSTER_NORMAL: 597 if ((cluster_offset & 511) != 0) { 598 ret = -EIO; 599 goto fail; 600 } 601 602 if (s->crypt_method) { 603 /* 604 * For encrypted images, read everything into a temporary 605 * contiguous buffer on which the AES functions can work. 606 */ 607 if (!cluster_data) { 608 cluster_data = 609 qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 610 } 611 612 assert(cur_nr_sectors <= 613 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 614 qemu_iovec_reset(&hd_qiov); 615 qemu_iovec_add(&hd_qiov, cluster_data, 616 512 * cur_nr_sectors); 617 } 618 619 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 620 qemu_co_mutex_unlock(&s->lock); 621 ret = bdrv_co_readv(bs->file, 622 (cluster_offset >> 9) + index_in_cluster, 623 cur_nr_sectors, &hd_qiov); 624 qemu_co_mutex_lock(&s->lock); 625 if (ret < 0) { 626 goto fail; 627 } 628 if (s->crypt_method) { 629 qcow2_encrypt_sectors(s, sector_num, cluster_data, 630 cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); 631 qemu_iovec_reset(&hd_qiov); 632 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 633 cur_nr_sectors * 512); 634 qemu_iovec_from_buffer(&hd_qiov, cluster_data, 635 512 * cur_nr_sectors); 636 } 637 break; 638 639 default: 640 g_assert_not_reached(); 641 ret = -EIO; 642 goto fail; 643 } 644 645 remaining_sectors -= cur_nr_sectors; 646 sector_num += cur_nr_sectors; 647 bytes_done += cur_nr_sectors * 512; 648 } 649 ret = 0; 650 651 fail: 652 qemu_co_mutex_unlock(&s->lock); 653 654 qemu_iovec_destroy(&hd_qiov); 655 qemu_vfree(cluster_data); 656 657 return ret; 658 } 659 660 static void run_dependent_requests(BDRVQcowState *s, QCowL2Meta *m) 661 { 662 /* Take the request off the list of running requests */ 663 if (m->nb_clusters != 0) { 664 QLIST_REMOVE(m, next_in_flight); 665 } 666 667 /* Restart all dependent requests */ 668 if (!qemu_co_queue_empty(&m->dependent_requests)) { 669 qemu_co_mutex_unlock(&s->lock); 670 qemu_co_queue_restart_all(&m->dependent_requests); 671 qemu_co_mutex_lock(&s->lock); 672 } 673 } 674 675 static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, 676 int64_t sector_num, 677 int remaining_sectors, 678 QEMUIOVector *qiov) 679 { 680 BDRVQcowState *s = bs->opaque; 681 int index_in_cluster; 682 int n_end; 683 int ret; 684 int cur_nr_sectors; /* number of sectors in current iteration */ 685 uint64_t cluster_offset; 686 QEMUIOVector hd_qiov; 687 uint64_t bytes_done = 0; 688 uint8_t *cluster_data = NULL; 689 QCowL2Meta l2meta = { 690 .nb_clusters = 0, 691 }; 692 693 trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num, 694 remaining_sectors); 695 696 qemu_co_queue_init(&l2meta.dependent_requests); 697 698 qemu_iovec_init(&hd_qiov, qiov->niov); 699 700 s->cluster_cache_offset = -1; /* disable compressed cache */ 701 702 qemu_co_mutex_lock(&s->lock); 703 704 while (remaining_sectors != 0) { 705 706 trace_qcow2_writev_start_part(qemu_coroutine_self()); 707 index_in_cluster = sector_num & (s->cluster_sectors - 1); 708 n_end = index_in_cluster + remaining_sectors; 709 if (s->crypt_method && 710 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { 711 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; 712 } 713 714 ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, 715 index_in_cluster, n_end, &cur_nr_sectors, &l2meta); 716 if (ret < 0) { 717 goto fail; 718 } 719 720 cluster_offset = l2meta.cluster_offset; 721 assert((cluster_offset & 511) == 0); 722 723 qemu_iovec_reset(&hd_qiov); 724 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 725 cur_nr_sectors * 512); 726 727 if (s->crypt_method) { 728 if (!cluster_data) { 729 cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * 730 s->cluster_size); 731 } 732 733 assert(hd_qiov.size <= 734 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 735 qemu_iovec_to_buffer(&hd_qiov, cluster_data); 736 737 qcow2_encrypt_sectors(s, sector_num, cluster_data, 738 cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); 739 740 qemu_iovec_reset(&hd_qiov); 741 qemu_iovec_add(&hd_qiov, cluster_data, 742 cur_nr_sectors * 512); 743 } 744 745 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 746 qemu_co_mutex_unlock(&s->lock); 747 trace_qcow2_writev_data(qemu_coroutine_self(), 748 (cluster_offset >> 9) + index_in_cluster); 749 ret = bdrv_co_writev(bs->file, 750 (cluster_offset >> 9) + index_in_cluster, 751 cur_nr_sectors, &hd_qiov); 752 qemu_co_mutex_lock(&s->lock); 753 if (ret < 0) { 754 goto fail; 755 } 756 757 ret = qcow2_alloc_cluster_link_l2(bs, &l2meta); 758 if (ret < 0) { 759 goto fail; 760 } 761 762 run_dependent_requests(s, &l2meta); 763 764 remaining_sectors -= cur_nr_sectors; 765 sector_num += cur_nr_sectors; 766 bytes_done += cur_nr_sectors * 512; 767 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors); 768 } 769 ret = 0; 770 771 fail: 772 run_dependent_requests(s, &l2meta); 773 774 qemu_co_mutex_unlock(&s->lock); 775 776 qemu_iovec_destroy(&hd_qiov); 777 qemu_vfree(cluster_data); 778 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); 779 780 return ret; 781 } 782 783 static void qcow2_close(BlockDriverState *bs) 784 { 785 BDRVQcowState *s = bs->opaque; 786 g_free(s->l1_table); 787 788 qcow2_cache_flush(bs, s->l2_table_cache); 789 qcow2_cache_flush(bs, s->refcount_block_cache); 790 791 qcow2_cache_destroy(bs, s->l2_table_cache); 792 qcow2_cache_destroy(bs, s->refcount_block_cache); 793 794 g_free(s->unknown_header_fields); 795 cleanup_unknown_header_ext(bs); 796 797 g_free(s->cluster_cache); 798 qemu_vfree(s->cluster_data); 799 qcow2_refcount_close(bs); 800 qcow2_free_snapshots(bs); 801 } 802 803 static void qcow2_invalidate_cache(BlockDriverState *bs) 804 { 805 BDRVQcowState *s = bs->opaque; 806 int flags = s->flags; 807 AES_KEY aes_encrypt_key; 808 AES_KEY aes_decrypt_key; 809 uint32_t crypt_method = 0; 810 811 /* 812 * Backing files are read-only which makes all of their metadata immutable, 813 * that means we don't have to worry about reopening them here. 814 */ 815 816 if (s->crypt_method) { 817 crypt_method = s->crypt_method; 818 memcpy(&aes_encrypt_key, &s->aes_encrypt_key, sizeof(aes_encrypt_key)); 819 memcpy(&aes_decrypt_key, &s->aes_decrypt_key, sizeof(aes_decrypt_key)); 820 } 821 822 qcow2_close(bs); 823 824 memset(s, 0, sizeof(BDRVQcowState)); 825 qcow2_open(bs, flags); 826 827 if (crypt_method) { 828 s->crypt_method = crypt_method; 829 memcpy(&s->aes_encrypt_key, &aes_encrypt_key, sizeof(aes_encrypt_key)); 830 memcpy(&s->aes_decrypt_key, &aes_decrypt_key, sizeof(aes_decrypt_key)); 831 } 832 } 833 834 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 835 size_t len, size_t buflen) 836 { 837 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 838 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 839 840 if (buflen < ext_len) { 841 return -ENOSPC; 842 } 843 844 *ext_backing_fmt = (QCowExtension) { 845 .magic = cpu_to_be32(magic), 846 .len = cpu_to_be32(len), 847 }; 848 memcpy(buf + sizeof(QCowExtension), s, len); 849 850 return ext_len; 851 } 852 853 /* 854 * Updates the qcow2 header, including the variable length parts of it, i.e. 855 * the backing file name and all extensions. qcow2 was not designed to allow 856 * such changes, so if we run out of space (we can only use the first cluster) 857 * this function may fail. 858 * 859 * Returns 0 on success, -errno in error cases. 860 */ 861 int qcow2_update_header(BlockDriverState *bs) 862 { 863 BDRVQcowState *s = bs->opaque; 864 QCowHeader *header; 865 char *buf; 866 size_t buflen = s->cluster_size; 867 int ret; 868 uint64_t total_size; 869 uint32_t refcount_table_clusters; 870 size_t header_length; 871 Qcow2UnknownHeaderExtension *uext; 872 873 buf = qemu_blockalign(bs, buflen); 874 875 /* Header structure */ 876 header = (QCowHeader*) buf; 877 878 if (buflen < sizeof(*header)) { 879 ret = -ENOSPC; 880 goto fail; 881 } 882 883 header_length = sizeof(*header) + s->unknown_header_fields_size; 884 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 885 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 886 887 *header = (QCowHeader) { 888 /* Version 2 fields */ 889 .magic = cpu_to_be32(QCOW_MAGIC), 890 .version = cpu_to_be32(s->qcow_version), 891 .backing_file_offset = 0, 892 .backing_file_size = 0, 893 .cluster_bits = cpu_to_be32(s->cluster_bits), 894 .size = cpu_to_be64(total_size), 895 .crypt_method = cpu_to_be32(s->crypt_method_header), 896 .l1_size = cpu_to_be32(s->l1_size), 897 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 898 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 899 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 900 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 901 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 902 903 /* Version 3 fields */ 904 .incompatible_features = cpu_to_be64(s->incompatible_features), 905 .compatible_features = cpu_to_be64(s->compatible_features), 906 .autoclear_features = cpu_to_be64(s->autoclear_features), 907 .refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT), 908 .header_length = cpu_to_be32(header_length), 909 }; 910 911 /* For older versions, write a shorter header */ 912 switch (s->qcow_version) { 913 case 2: 914 ret = offsetof(QCowHeader, incompatible_features); 915 break; 916 case 3: 917 ret = sizeof(*header); 918 break; 919 default: 920 ret = -EINVAL; 921 goto fail; 922 } 923 924 buf += ret; 925 buflen -= ret; 926 memset(buf, 0, buflen); 927 928 /* Preserve any unknown field in the header */ 929 if (s->unknown_header_fields_size) { 930 if (buflen < s->unknown_header_fields_size) { 931 ret = -ENOSPC; 932 goto fail; 933 } 934 935 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); 936 buf += s->unknown_header_fields_size; 937 buflen -= s->unknown_header_fields_size; 938 } 939 940 /* Backing file format header extension */ 941 if (*bs->backing_format) { 942 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 943 bs->backing_format, strlen(bs->backing_format), 944 buflen); 945 if (ret < 0) { 946 goto fail; 947 } 948 949 buf += ret; 950 buflen -= ret; 951 } 952 953 /* Feature table */ 954 Qcow2Feature features[] = { 955 /* no feature defined yet */ 956 }; 957 958 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, 959 features, sizeof(features), buflen); 960 if (ret < 0) { 961 goto fail; 962 } 963 buf += ret; 964 buflen -= ret; 965 966 /* Keep unknown header extensions */ 967 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 968 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 969 if (ret < 0) { 970 goto fail; 971 } 972 973 buf += ret; 974 buflen -= ret; 975 } 976 977 /* End of header extensions */ 978 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 979 if (ret < 0) { 980 goto fail; 981 } 982 983 buf += ret; 984 buflen -= ret; 985 986 /* Backing file name */ 987 if (*bs->backing_file) { 988 size_t backing_file_len = strlen(bs->backing_file); 989 990 if (buflen < backing_file_len) { 991 ret = -ENOSPC; 992 goto fail; 993 } 994 995 strncpy(buf, bs->backing_file, buflen); 996 997 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 998 header->backing_file_size = cpu_to_be32(backing_file_len); 999 } 1000 1001 /* Write the new header */ 1002 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 1003 if (ret < 0) { 1004 goto fail; 1005 } 1006 1007 ret = 0; 1008 fail: 1009 qemu_vfree(header); 1010 return ret; 1011 } 1012 1013 static int qcow2_change_backing_file(BlockDriverState *bs, 1014 const char *backing_file, const char *backing_fmt) 1015 { 1016 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 1017 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 1018 1019 return qcow2_update_header(bs); 1020 } 1021 1022 static int preallocate(BlockDriverState *bs) 1023 { 1024 uint64_t nb_sectors; 1025 uint64_t offset; 1026 int num; 1027 int ret; 1028 QCowL2Meta meta; 1029 1030 nb_sectors = bdrv_getlength(bs) >> 9; 1031 offset = 0; 1032 qemu_co_queue_init(&meta.dependent_requests); 1033 meta.cluster_offset = 0; 1034 1035 while (nb_sectors) { 1036 num = MIN(nb_sectors, INT_MAX >> 9); 1037 ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta); 1038 if (ret < 0) { 1039 return ret; 1040 } 1041 1042 ret = qcow2_alloc_cluster_link_l2(bs, &meta); 1043 if (ret < 0) { 1044 qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters); 1045 return ret; 1046 } 1047 1048 /* There are no dependent requests, but we need to remove our request 1049 * from the list of in-flight requests */ 1050 run_dependent_requests(bs->opaque, &meta); 1051 1052 /* TODO Preallocate data if requested */ 1053 1054 nb_sectors -= num; 1055 offset += num << 9; 1056 } 1057 1058 /* 1059 * It is expected that the image file is large enough to actually contain 1060 * all of the allocated clusters (otherwise we get failing reads after 1061 * EOF). Extend the image to the last allocated sector. 1062 */ 1063 if (meta.cluster_offset != 0) { 1064 uint8_t buf[512]; 1065 memset(buf, 0, 512); 1066 ret = bdrv_write(bs->file, (meta.cluster_offset >> 9) + num - 1, buf, 1); 1067 if (ret < 0) { 1068 return ret; 1069 } 1070 } 1071 1072 return 0; 1073 } 1074 1075 static int qcow2_create2(const char *filename, int64_t total_size, 1076 const char *backing_file, const char *backing_format, 1077 int flags, size_t cluster_size, int prealloc, 1078 QEMUOptionParameter *options, int version) 1079 { 1080 /* Calculate cluster_bits */ 1081 int cluster_bits; 1082 cluster_bits = ffs(cluster_size) - 1; 1083 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 1084 (1 << cluster_bits) != cluster_size) 1085 { 1086 error_report( 1087 "Cluster size must be a power of two between %d and %dk", 1088 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 1089 return -EINVAL; 1090 } 1091 1092 /* 1093 * Open the image file and write a minimal qcow2 header. 1094 * 1095 * We keep things simple and start with a zero-sized image. We also 1096 * do without refcount blocks or a L1 table for now. We'll fix the 1097 * inconsistency later. 1098 * 1099 * We do need a refcount table because growing the refcount table means 1100 * allocating two new refcount blocks - the seconds of which would be at 1101 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 1102 * size for any qcow2 image. 1103 */ 1104 BlockDriverState* bs; 1105 QCowHeader header; 1106 uint8_t* refcount_table; 1107 int ret; 1108 1109 ret = bdrv_create_file(filename, options); 1110 if (ret < 0) { 1111 return ret; 1112 } 1113 1114 ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR); 1115 if (ret < 0) { 1116 return ret; 1117 } 1118 1119 /* Write the header */ 1120 memset(&header, 0, sizeof(header)); 1121 header.magic = cpu_to_be32(QCOW_MAGIC); 1122 header.version = cpu_to_be32(version); 1123 header.cluster_bits = cpu_to_be32(cluster_bits); 1124 header.size = cpu_to_be64(0); 1125 header.l1_table_offset = cpu_to_be64(0); 1126 header.l1_size = cpu_to_be32(0); 1127 header.refcount_table_offset = cpu_to_be64(cluster_size); 1128 header.refcount_table_clusters = cpu_to_be32(1); 1129 header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT); 1130 header.header_length = cpu_to_be32(sizeof(header)); 1131 1132 if (flags & BLOCK_FLAG_ENCRYPT) { 1133 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); 1134 } else { 1135 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 1136 } 1137 1138 ret = bdrv_pwrite(bs, 0, &header, sizeof(header)); 1139 if (ret < 0) { 1140 goto out; 1141 } 1142 1143 /* Write an empty refcount table */ 1144 refcount_table = g_malloc0(cluster_size); 1145 ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); 1146 g_free(refcount_table); 1147 1148 if (ret < 0) { 1149 goto out; 1150 } 1151 1152 bdrv_close(bs); 1153 1154 /* 1155 * And now open the image and make it consistent first (i.e. increase the 1156 * refcount of the cluster that is occupied by the header and the refcount 1157 * table) 1158 */ 1159 BlockDriver* drv = bdrv_find_format("qcow2"); 1160 assert(drv != NULL); 1161 ret = bdrv_open(bs, filename, 1162 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv); 1163 if (ret < 0) { 1164 goto out; 1165 } 1166 1167 ret = qcow2_alloc_clusters(bs, 2 * cluster_size); 1168 if (ret < 0) { 1169 goto out; 1170 1171 } else if (ret != 0) { 1172 error_report("Huh, first cluster in empty image is already in use?"); 1173 abort(); 1174 } 1175 1176 /* Okay, now that we have a valid image, let's give it the right size */ 1177 ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); 1178 if (ret < 0) { 1179 goto out; 1180 } 1181 1182 /* Want a backing file? There you go.*/ 1183 if (backing_file) { 1184 ret = bdrv_change_backing_file(bs, backing_file, backing_format); 1185 if (ret < 0) { 1186 goto out; 1187 } 1188 } 1189 1190 /* And if we're supposed to preallocate metadata, do that now */ 1191 if (prealloc) { 1192 BDRVQcowState *s = bs->opaque; 1193 qemu_co_mutex_lock(&s->lock); 1194 ret = preallocate(bs); 1195 qemu_co_mutex_unlock(&s->lock); 1196 if (ret < 0) { 1197 goto out; 1198 } 1199 } 1200 1201 ret = 0; 1202 out: 1203 bdrv_delete(bs); 1204 return ret; 1205 } 1206 1207 static int qcow2_create(const char *filename, QEMUOptionParameter *options) 1208 { 1209 const char *backing_file = NULL; 1210 const char *backing_fmt = NULL; 1211 uint64_t sectors = 0; 1212 int flags = 0; 1213 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 1214 int prealloc = 0; 1215 int version = 2; 1216 1217 /* Read out options */ 1218 while (options && options->name) { 1219 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 1220 sectors = options->value.n / 512; 1221 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 1222 backing_file = options->value.s; 1223 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 1224 backing_fmt = options->value.s; 1225 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { 1226 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; 1227 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 1228 if (options->value.n) { 1229 cluster_size = options->value.n; 1230 } 1231 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 1232 if (!options->value.s || !strcmp(options->value.s, "off")) { 1233 prealloc = 0; 1234 } else if (!strcmp(options->value.s, "metadata")) { 1235 prealloc = 1; 1236 } else { 1237 fprintf(stderr, "Invalid preallocation mode: '%s'\n", 1238 options->value.s); 1239 return -EINVAL; 1240 } 1241 } else if (!strcmp(options->name, BLOCK_OPT_COMPAT_LEVEL)) { 1242 if (!options->value.s || !strcmp(options->value.s, "0.10")) { 1243 version = 2; 1244 } else if (!strcmp(options->value.s, "1.1")) { 1245 version = 3; 1246 } else { 1247 fprintf(stderr, "Invalid compatibility level: '%s'\n", 1248 options->value.s); 1249 return -EINVAL; 1250 } 1251 } 1252 options++; 1253 } 1254 1255 if (backing_file && prealloc) { 1256 fprintf(stderr, "Backing file and preallocation cannot be used at " 1257 "the same time\n"); 1258 return -EINVAL; 1259 } 1260 1261 return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags, 1262 cluster_size, prealloc, options, version); 1263 } 1264 1265 static int qcow2_make_empty(BlockDriverState *bs) 1266 { 1267 #if 0 1268 /* XXX: not correct */ 1269 BDRVQcowState *s = bs->opaque; 1270 uint32_t l1_length = s->l1_size * sizeof(uint64_t); 1271 int ret; 1272 1273 memset(s->l1_table, 0, l1_length); 1274 if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0) 1275 return -1; 1276 ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length); 1277 if (ret < 0) 1278 return ret; 1279 1280 l2_cache_reset(bs); 1281 #endif 1282 return 0; 1283 } 1284 1285 static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs, 1286 int64_t sector_num, int nb_sectors) 1287 { 1288 int ret; 1289 BDRVQcowState *s = bs->opaque; 1290 1291 /* Emulate misaligned zero writes */ 1292 if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) { 1293 return -ENOTSUP; 1294 } 1295 1296 /* Whatever is left can use real zero clusters */ 1297 qemu_co_mutex_lock(&s->lock); 1298 ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS, 1299 nb_sectors); 1300 qemu_co_mutex_unlock(&s->lock); 1301 1302 return ret; 1303 } 1304 1305 static coroutine_fn int qcow2_co_discard(BlockDriverState *bs, 1306 int64_t sector_num, int nb_sectors) 1307 { 1308 int ret; 1309 BDRVQcowState *s = bs->opaque; 1310 1311 qemu_co_mutex_lock(&s->lock); 1312 ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, 1313 nb_sectors); 1314 qemu_co_mutex_unlock(&s->lock); 1315 return ret; 1316 } 1317 1318 static int qcow2_truncate(BlockDriverState *bs, int64_t offset) 1319 { 1320 BDRVQcowState *s = bs->opaque; 1321 int ret, new_l1_size; 1322 1323 if (offset & 511) { 1324 error_report("The new size must be a multiple of 512"); 1325 return -EINVAL; 1326 } 1327 1328 /* cannot proceed if image has snapshots */ 1329 if (s->nb_snapshots) { 1330 error_report("Can't resize an image which has snapshots"); 1331 return -ENOTSUP; 1332 } 1333 1334 /* shrinking is currently not supported */ 1335 if (offset < bs->total_sectors * 512) { 1336 error_report("qcow2 doesn't support shrinking images yet"); 1337 return -ENOTSUP; 1338 } 1339 1340 new_l1_size = size_to_l1(s, offset); 1341 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 1342 if (ret < 0) { 1343 return ret; 1344 } 1345 1346 /* write updated header.size */ 1347 offset = cpu_to_be64(offset); 1348 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 1349 &offset, sizeof(uint64_t)); 1350 if (ret < 0) { 1351 return ret; 1352 } 1353 1354 s->l1_vm_state_index = new_l1_size; 1355 return 0; 1356 } 1357 1358 /* XXX: put compressed sectors first, then all the cluster aligned 1359 tables to avoid losing bytes in alignment */ 1360 static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, 1361 const uint8_t *buf, int nb_sectors) 1362 { 1363 BDRVQcowState *s = bs->opaque; 1364 z_stream strm; 1365 int ret, out_len; 1366 uint8_t *out_buf; 1367 uint64_t cluster_offset; 1368 1369 if (nb_sectors == 0) { 1370 /* align end of file to a sector boundary to ease reading with 1371 sector based I/Os */ 1372 cluster_offset = bdrv_getlength(bs->file); 1373 cluster_offset = (cluster_offset + 511) & ~511; 1374 bdrv_truncate(bs->file, cluster_offset); 1375 return 0; 1376 } 1377 1378 if (nb_sectors != s->cluster_sectors) 1379 return -EINVAL; 1380 1381 out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); 1382 1383 /* best compression, small window, no zlib header */ 1384 memset(&strm, 0, sizeof(strm)); 1385 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 1386 Z_DEFLATED, -12, 1387 9, Z_DEFAULT_STRATEGY); 1388 if (ret != 0) { 1389 ret = -EINVAL; 1390 goto fail; 1391 } 1392 1393 strm.avail_in = s->cluster_size; 1394 strm.next_in = (uint8_t *)buf; 1395 strm.avail_out = s->cluster_size; 1396 strm.next_out = out_buf; 1397 1398 ret = deflate(&strm, Z_FINISH); 1399 if (ret != Z_STREAM_END && ret != Z_OK) { 1400 deflateEnd(&strm); 1401 ret = -EINVAL; 1402 goto fail; 1403 } 1404 out_len = strm.next_out - out_buf; 1405 1406 deflateEnd(&strm); 1407 1408 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 1409 /* could not compress: write normal cluster */ 1410 ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors); 1411 if (ret < 0) { 1412 goto fail; 1413 } 1414 } else { 1415 cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, 1416 sector_num << 9, out_len); 1417 if (!cluster_offset) { 1418 ret = -EIO; 1419 goto fail; 1420 } 1421 cluster_offset &= s->cluster_offset_mask; 1422 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 1423 ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len); 1424 if (ret < 0) { 1425 goto fail; 1426 } 1427 } 1428 1429 ret = 0; 1430 fail: 1431 g_free(out_buf); 1432 return ret; 1433 } 1434 1435 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 1436 { 1437 BDRVQcowState *s = bs->opaque; 1438 int ret; 1439 1440 qemu_co_mutex_lock(&s->lock); 1441 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1442 if (ret < 0) { 1443 qemu_co_mutex_unlock(&s->lock); 1444 return ret; 1445 } 1446 1447 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1448 if (ret < 0) { 1449 qemu_co_mutex_unlock(&s->lock); 1450 return ret; 1451 } 1452 qemu_co_mutex_unlock(&s->lock); 1453 1454 return 0; 1455 } 1456 1457 static int64_t qcow2_vm_state_offset(BDRVQcowState *s) 1458 { 1459 return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits); 1460 } 1461 1462 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1463 { 1464 BDRVQcowState *s = bs->opaque; 1465 bdi->cluster_size = s->cluster_size; 1466 bdi->vm_state_offset = qcow2_vm_state_offset(s); 1467 return 0; 1468 } 1469 1470 1471 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, 1472 BdrvCheckMode fix) 1473 { 1474 return qcow2_check_refcounts(bs, result, fix); 1475 } 1476 1477 #if 0 1478 static void dump_refcounts(BlockDriverState *bs) 1479 { 1480 BDRVQcowState *s = bs->opaque; 1481 int64_t nb_clusters, k, k1, size; 1482 int refcount; 1483 1484 size = bdrv_getlength(bs->file); 1485 nb_clusters = size_to_clusters(s, size); 1486 for(k = 0; k < nb_clusters;) { 1487 k1 = k; 1488 refcount = get_refcount(bs, k); 1489 k++; 1490 while (k < nb_clusters && get_refcount(bs, k) == refcount) 1491 k++; 1492 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 1493 k - k1); 1494 } 1495 } 1496 #endif 1497 1498 static int qcow2_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1499 int64_t pos, int size) 1500 { 1501 BDRVQcowState *s = bs->opaque; 1502 int growable = bs->growable; 1503 int ret; 1504 1505 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 1506 bs->growable = 1; 1507 ret = bdrv_pwrite(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1508 bs->growable = growable; 1509 1510 return ret; 1511 } 1512 1513 static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1514 int64_t pos, int size) 1515 { 1516 BDRVQcowState *s = bs->opaque; 1517 int growable = bs->growable; 1518 int ret; 1519 1520 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 1521 bs->growable = 1; 1522 ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1523 bs->growable = growable; 1524 1525 return ret; 1526 } 1527 1528 static QEMUOptionParameter qcow2_create_options[] = { 1529 { 1530 .name = BLOCK_OPT_SIZE, 1531 .type = OPT_SIZE, 1532 .help = "Virtual disk size" 1533 }, 1534 { 1535 .name = BLOCK_OPT_COMPAT_LEVEL, 1536 .type = OPT_STRING, 1537 .help = "Compatibility level (0.10 or 1.1)" 1538 }, 1539 { 1540 .name = BLOCK_OPT_BACKING_FILE, 1541 .type = OPT_STRING, 1542 .help = "File name of a base image" 1543 }, 1544 { 1545 .name = BLOCK_OPT_BACKING_FMT, 1546 .type = OPT_STRING, 1547 .help = "Image format of the base image" 1548 }, 1549 { 1550 .name = BLOCK_OPT_ENCRYPT, 1551 .type = OPT_FLAG, 1552 .help = "Encrypt the image" 1553 }, 1554 { 1555 .name = BLOCK_OPT_CLUSTER_SIZE, 1556 .type = OPT_SIZE, 1557 .help = "qcow2 cluster size", 1558 .value = { .n = DEFAULT_CLUSTER_SIZE }, 1559 }, 1560 { 1561 .name = BLOCK_OPT_PREALLOC, 1562 .type = OPT_STRING, 1563 .help = "Preallocation mode (allowed values: off, metadata)" 1564 }, 1565 { NULL } 1566 }; 1567 1568 static BlockDriver bdrv_qcow2 = { 1569 .format_name = "qcow2", 1570 .instance_size = sizeof(BDRVQcowState), 1571 .bdrv_probe = qcow2_probe, 1572 .bdrv_open = qcow2_open, 1573 .bdrv_close = qcow2_close, 1574 .bdrv_create = qcow2_create, 1575 .bdrv_co_is_allocated = qcow2_co_is_allocated, 1576 .bdrv_set_key = qcow2_set_key, 1577 .bdrv_make_empty = qcow2_make_empty, 1578 1579 .bdrv_co_readv = qcow2_co_readv, 1580 .bdrv_co_writev = qcow2_co_writev, 1581 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 1582 1583 .bdrv_co_write_zeroes = qcow2_co_write_zeroes, 1584 .bdrv_co_discard = qcow2_co_discard, 1585 .bdrv_truncate = qcow2_truncate, 1586 .bdrv_write_compressed = qcow2_write_compressed, 1587 1588 .bdrv_snapshot_create = qcow2_snapshot_create, 1589 .bdrv_snapshot_goto = qcow2_snapshot_goto, 1590 .bdrv_snapshot_delete = qcow2_snapshot_delete, 1591 .bdrv_snapshot_list = qcow2_snapshot_list, 1592 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 1593 .bdrv_get_info = qcow2_get_info, 1594 1595 .bdrv_save_vmstate = qcow2_save_vmstate, 1596 .bdrv_load_vmstate = qcow2_load_vmstate, 1597 1598 .bdrv_change_backing_file = qcow2_change_backing_file, 1599 1600 .bdrv_invalidate_cache = qcow2_invalidate_cache, 1601 1602 .create_options = qcow2_create_options, 1603 .bdrv_check = qcow2_check, 1604 }; 1605 1606 static void bdrv_qcow2_init(void) 1607 { 1608 bdrv_register(&bdrv_qcow2); 1609 } 1610 1611 block_init(bdrv_qcow2_init); 1612