1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "qemu-common.h" 25 #include "block_int.h" 26 #include "module.h" 27 #include <zlib.h> 28 #include "aes.h" 29 #include "block/qcow2.h" 30 #include "qemu-error.h" 31 #include "qerror.h" 32 33 /* 34 Differences with QCOW: 35 36 - Support for multiple incremental snapshots. 37 - Memory management by reference counts. 38 - Clusters which have a reference count of one have the bit 39 QCOW_OFLAG_COPIED to optimize write performance. 40 - Size of compressed clusters is stored in sectors to reduce bit usage 41 in the cluster offsets. 42 - Support for storing additional data (such as the VM state) in the 43 snapshots. 44 - If a backing store is used, the cluster size is not constrained 45 (could be backported to QCOW). 46 - L2 tables have always a size of one cluster. 47 */ 48 49 50 typedef struct { 51 uint32_t magic; 52 uint32_t len; 53 } QCowExtension; 54 #define QCOW2_EXT_MAGIC_END 0 55 #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA 56 57 static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) 58 { 59 const QCowHeader *cow_header = (const void *)buf; 60 61 if (buf_size >= sizeof(QCowHeader) && 62 be32_to_cpu(cow_header->magic) == QCOW_MAGIC && 63 be32_to_cpu(cow_header->version) >= QCOW_VERSION) 64 return 100; 65 else 66 return 0; 67 } 68 69 70 /* 71 * read qcow2 extension and fill bs 72 * start reading from start_offset 73 * finish reading upon magic of value 0 or when end_offset reached 74 * unknown magic is skipped (future extension this version knows nothing about) 75 * return 0 upon success, non-0 otherwise 76 */ 77 static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, 78 uint64_t end_offset) 79 { 80 BDRVQcowState *s = bs->opaque; 81 QCowExtension ext; 82 uint64_t offset; 83 int ret; 84 85 #ifdef DEBUG_EXT 86 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); 87 #endif 88 offset = start_offset; 89 while (offset < end_offset) { 90 91 #ifdef DEBUG_EXT 92 /* Sanity check */ 93 if (offset > s->cluster_size) 94 printf("qcow2_read_extension: suspicious offset %lu\n", offset); 95 96 printf("attempting to read extended header in offset %lu\n", offset); 97 #endif 98 99 if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) { 100 fprintf(stderr, "qcow2_read_extension: ERROR: " 101 "pread fail from offset %" PRIu64 "\n", 102 offset); 103 return 1; 104 } 105 be32_to_cpus(&ext.magic); 106 be32_to_cpus(&ext.len); 107 offset += sizeof(ext); 108 #ifdef DEBUG_EXT 109 printf("ext.magic = 0x%x\n", ext.magic); 110 #endif 111 if (ext.len > end_offset - offset) { 112 error_report("Header extension too large"); 113 return -EINVAL; 114 } 115 116 switch (ext.magic) { 117 case QCOW2_EXT_MAGIC_END: 118 return 0; 119 120 case QCOW2_EXT_MAGIC_BACKING_FORMAT: 121 if (ext.len >= sizeof(bs->backing_format)) { 122 fprintf(stderr, "ERROR: ext_backing_format: len=%u too large" 123 " (>=%zu)\n", 124 ext.len, sizeof(bs->backing_format)); 125 return 2; 126 } 127 if (bdrv_pread(bs->file, offset , bs->backing_format, 128 ext.len) != ext.len) 129 return 3; 130 bs->backing_format[ext.len] = '\0'; 131 #ifdef DEBUG_EXT 132 printf("Qcow2: Got format extension %s\n", bs->backing_format); 133 #endif 134 break; 135 136 default: 137 /* unknown magic - save it in case we need to rewrite the header */ 138 { 139 Qcow2UnknownHeaderExtension *uext; 140 141 uext = g_malloc0(sizeof(*uext) + ext.len); 142 uext->magic = ext.magic; 143 uext->len = ext.len; 144 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); 145 146 ret = bdrv_pread(bs->file, offset , uext->data, uext->len); 147 if (ret < 0) { 148 return ret; 149 } 150 } 151 break; 152 } 153 154 offset += ((ext.len + 7) & ~7); 155 } 156 157 return 0; 158 } 159 160 static void cleanup_unknown_header_ext(BlockDriverState *bs) 161 { 162 BDRVQcowState *s = bs->opaque; 163 Qcow2UnknownHeaderExtension *uext, *next; 164 165 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { 166 QLIST_REMOVE(uext, next); 167 g_free(uext); 168 } 169 } 170 171 static int qcow2_open(BlockDriverState *bs, int flags) 172 { 173 BDRVQcowState *s = bs->opaque; 174 int len, i, ret = 0; 175 QCowHeader header; 176 uint64_t ext_end; 177 bool writethrough; 178 179 ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); 180 if (ret < 0) { 181 goto fail; 182 } 183 be32_to_cpus(&header.magic); 184 be32_to_cpus(&header.version); 185 be64_to_cpus(&header.backing_file_offset); 186 be32_to_cpus(&header.backing_file_size); 187 be64_to_cpus(&header.size); 188 be32_to_cpus(&header.cluster_bits); 189 be32_to_cpus(&header.crypt_method); 190 be64_to_cpus(&header.l1_table_offset); 191 be32_to_cpus(&header.l1_size); 192 be64_to_cpus(&header.refcount_table_offset); 193 be32_to_cpus(&header.refcount_table_clusters); 194 be64_to_cpus(&header.snapshots_offset); 195 be32_to_cpus(&header.nb_snapshots); 196 197 if (header.magic != QCOW_MAGIC) { 198 ret = -EINVAL; 199 goto fail; 200 } 201 if (header.version != QCOW_VERSION) { 202 char version[64]; 203 snprintf(version, sizeof(version), "QCOW version %d", header.version); 204 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, 205 bs->device_name, "qcow2", version); 206 ret = -ENOTSUP; 207 goto fail; 208 } 209 if (header.cluster_bits < MIN_CLUSTER_BITS || 210 header.cluster_bits > MAX_CLUSTER_BITS) { 211 ret = -EINVAL; 212 goto fail; 213 } 214 if (header.crypt_method > QCOW_CRYPT_AES) { 215 ret = -EINVAL; 216 goto fail; 217 } 218 s->crypt_method_header = header.crypt_method; 219 if (s->crypt_method_header) { 220 bs->encrypted = 1; 221 } 222 s->cluster_bits = header.cluster_bits; 223 s->cluster_size = 1 << s->cluster_bits; 224 s->cluster_sectors = 1 << (s->cluster_bits - 9); 225 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ 226 s->l2_size = 1 << s->l2_bits; 227 bs->total_sectors = header.size / 512; 228 s->csize_shift = (62 - (s->cluster_bits - 8)); 229 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; 230 s->cluster_offset_mask = (1LL << s->csize_shift) - 1; 231 s->refcount_table_offset = header.refcount_table_offset; 232 s->refcount_table_size = 233 header.refcount_table_clusters << (s->cluster_bits - 3); 234 235 s->snapshots_offset = header.snapshots_offset; 236 s->nb_snapshots = header.nb_snapshots; 237 238 /* read the level 1 table */ 239 s->l1_size = header.l1_size; 240 s->l1_vm_state_index = size_to_l1(s, header.size); 241 /* the L1 table must contain at least enough entries to put 242 header.size bytes */ 243 if (s->l1_size < s->l1_vm_state_index) { 244 ret = -EINVAL; 245 goto fail; 246 } 247 s->l1_table_offset = header.l1_table_offset; 248 if (s->l1_size > 0) { 249 s->l1_table = g_malloc0( 250 align_offset(s->l1_size * sizeof(uint64_t), 512)); 251 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, 252 s->l1_size * sizeof(uint64_t)); 253 if (ret < 0) { 254 goto fail; 255 } 256 for(i = 0;i < s->l1_size; i++) { 257 be64_to_cpus(&s->l1_table[i]); 258 } 259 } 260 261 /* alloc L2 table/refcount block cache */ 262 writethrough = ((flags & BDRV_O_CACHE_WB) == 0); 263 s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE, writethrough); 264 s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE, 265 writethrough); 266 267 s->cluster_cache = g_malloc(s->cluster_size); 268 /* one more sector for decompressed data alignment */ 269 s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size 270 + 512); 271 s->cluster_cache_offset = -1; 272 s->flags = flags; 273 274 ret = qcow2_refcount_init(bs); 275 if (ret != 0) { 276 goto fail; 277 } 278 279 QLIST_INIT(&s->cluster_allocs); 280 281 /* read qcow2 extensions */ 282 if (header.backing_file_offset) { 283 ext_end = header.backing_file_offset; 284 } else { 285 ext_end = s->cluster_size; 286 } 287 if (qcow2_read_extensions(bs, sizeof(header), ext_end)) { 288 ret = -EINVAL; 289 goto fail; 290 } 291 292 /* read the backing file name */ 293 if (header.backing_file_offset != 0) { 294 len = header.backing_file_size; 295 if (len > 1023) { 296 len = 1023; 297 } 298 ret = bdrv_pread(bs->file, header.backing_file_offset, 299 bs->backing_file, len); 300 if (ret < 0) { 301 goto fail; 302 } 303 bs->backing_file[len] = '\0'; 304 } 305 306 ret = qcow2_read_snapshots(bs); 307 if (ret < 0) { 308 goto fail; 309 } 310 311 /* Initialise locks */ 312 qemu_co_mutex_init(&s->lock); 313 314 #ifdef DEBUG_ALLOC 315 { 316 BdrvCheckResult result = {0}; 317 qcow2_check_refcounts(bs, &result); 318 } 319 #endif 320 return ret; 321 322 fail: 323 cleanup_unknown_header_ext(bs); 324 qcow2_free_snapshots(bs); 325 qcow2_refcount_close(bs); 326 g_free(s->l1_table); 327 if (s->l2_table_cache) { 328 qcow2_cache_destroy(bs, s->l2_table_cache); 329 } 330 g_free(s->cluster_cache); 331 qemu_vfree(s->cluster_data); 332 return ret; 333 } 334 335 static int qcow2_set_key(BlockDriverState *bs, const char *key) 336 { 337 BDRVQcowState *s = bs->opaque; 338 uint8_t keybuf[16]; 339 int len, i; 340 341 memset(keybuf, 0, 16); 342 len = strlen(key); 343 if (len > 16) 344 len = 16; 345 /* XXX: we could compress the chars to 7 bits to increase 346 entropy */ 347 for(i = 0;i < len;i++) { 348 keybuf[i] = key[i]; 349 } 350 s->crypt_method = s->crypt_method_header; 351 352 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) 353 return -1; 354 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) 355 return -1; 356 #if 0 357 /* test */ 358 { 359 uint8_t in[16]; 360 uint8_t out[16]; 361 uint8_t tmp[16]; 362 for(i=0;i<16;i++) 363 in[i] = i; 364 AES_encrypt(in, tmp, &s->aes_encrypt_key); 365 AES_decrypt(tmp, out, &s->aes_decrypt_key); 366 for(i = 0; i < 16; i++) 367 printf(" %02x", tmp[i]); 368 printf("\n"); 369 for(i = 0; i < 16; i++) 370 printf(" %02x", out[i]); 371 printf("\n"); 372 } 373 #endif 374 return 0; 375 } 376 377 static int coroutine_fn qcow2_co_is_allocated(BlockDriverState *bs, 378 int64_t sector_num, int nb_sectors, int *pnum) 379 { 380 BDRVQcowState *s = bs->opaque; 381 uint64_t cluster_offset; 382 int ret; 383 384 *pnum = nb_sectors; 385 /* FIXME We can get errors here, but the bdrv_co_is_allocated interface 386 * can't pass them on today */ 387 qemu_co_mutex_lock(&s->lock); 388 ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); 389 qemu_co_mutex_unlock(&s->lock); 390 if (ret < 0) { 391 *pnum = 0; 392 } 393 394 return (cluster_offset != 0); 395 } 396 397 /* handle reading after the end of the backing file */ 398 int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, 399 int64_t sector_num, int nb_sectors) 400 { 401 int n1; 402 if ((sector_num + nb_sectors) <= bs->total_sectors) 403 return nb_sectors; 404 if (sector_num >= bs->total_sectors) 405 n1 = 0; 406 else 407 n1 = bs->total_sectors - sector_num; 408 409 qemu_iovec_memset_skip(qiov, 0, 512 * (nb_sectors - n1), 512 * n1); 410 411 return n1; 412 } 413 414 static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, 415 int remaining_sectors, QEMUIOVector *qiov) 416 { 417 BDRVQcowState *s = bs->opaque; 418 int index_in_cluster, n1; 419 int ret; 420 int cur_nr_sectors; /* number of sectors in current iteration */ 421 uint64_t cluster_offset = 0; 422 uint64_t bytes_done = 0; 423 QEMUIOVector hd_qiov; 424 uint8_t *cluster_data = NULL; 425 426 qemu_iovec_init(&hd_qiov, qiov->niov); 427 428 qemu_co_mutex_lock(&s->lock); 429 430 while (remaining_sectors != 0) { 431 432 /* prepare next request */ 433 cur_nr_sectors = remaining_sectors; 434 if (s->crypt_method) { 435 cur_nr_sectors = MIN(cur_nr_sectors, 436 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 437 } 438 439 ret = qcow2_get_cluster_offset(bs, sector_num << 9, 440 &cur_nr_sectors, &cluster_offset); 441 if (ret < 0) { 442 goto fail; 443 } 444 445 index_in_cluster = sector_num & (s->cluster_sectors - 1); 446 447 qemu_iovec_reset(&hd_qiov); 448 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 449 cur_nr_sectors * 512); 450 451 if (!cluster_offset) { 452 453 if (bs->backing_hd) { 454 /* read from the base image */ 455 n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, 456 sector_num, cur_nr_sectors); 457 if (n1 > 0) { 458 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); 459 qemu_co_mutex_unlock(&s->lock); 460 ret = bdrv_co_readv(bs->backing_hd, sector_num, 461 n1, &hd_qiov); 462 qemu_co_mutex_lock(&s->lock); 463 if (ret < 0) { 464 goto fail; 465 } 466 } 467 } else { 468 /* Note: in this case, no need to wait */ 469 qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors); 470 } 471 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) { 472 /* add AIO support for compressed blocks ? */ 473 ret = qcow2_decompress_cluster(bs, cluster_offset); 474 if (ret < 0) { 475 goto fail; 476 } 477 478 qemu_iovec_from_buffer(&hd_qiov, 479 s->cluster_cache + index_in_cluster * 512, 480 512 * cur_nr_sectors); 481 } else { 482 if ((cluster_offset & 511) != 0) { 483 ret = -EIO; 484 goto fail; 485 } 486 487 if (s->crypt_method) { 488 /* 489 * For encrypted images, read everything into a temporary 490 * contiguous buffer on which the AES functions can work. 491 */ 492 if (!cluster_data) { 493 cluster_data = 494 qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 495 } 496 497 assert(cur_nr_sectors <= 498 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); 499 qemu_iovec_reset(&hd_qiov); 500 qemu_iovec_add(&hd_qiov, cluster_data, 501 512 * cur_nr_sectors); 502 } 503 504 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); 505 qemu_co_mutex_unlock(&s->lock); 506 ret = bdrv_co_readv(bs->file, 507 (cluster_offset >> 9) + index_in_cluster, 508 cur_nr_sectors, &hd_qiov); 509 qemu_co_mutex_lock(&s->lock); 510 if (ret < 0) { 511 goto fail; 512 } 513 if (s->crypt_method) { 514 qcow2_encrypt_sectors(s, sector_num, cluster_data, 515 cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); 516 qemu_iovec_reset(&hd_qiov); 517 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 518 cur_nr_sectors * 512); 519 qemu_iovec_from_buffer(&hd_qiov, cluster_data, 520 512 * cur_nr_sectors); 521 } 522 } 523 524 remaining_sectors -= cur_nr_sectors; 525 sector_num += cur_nr_sectors; 526 bytes_done += cur_nr_sectors * 512; 527 } 528 ret = 0; 529 530 fail: 531 qemu_co_mutex_unlock(&s->lock); 532 533 qemu_iovec_destroy(&hd_qiov); 534 qemu_vfree(cluster_data); 535 536 return ret; 537 } 538 539 static void run_dependent_requests(BDRVQcowState *s, QCowL2Meta *m) 540 { 541 /* Take the request off the list of running requests */ 542 if (m->nb_clusters != 0) { 543 QLIST_REMOVE(m, next_in_flight); 544 } 545 546 /* Restart all dependent requests */ 547 if (!qemu_co_queue_empty(&m->dependent_requests)) { 548 qemu_co_mutex_unlock(&s->lock); 549 qemu_co_queue_restart_all(&m->dependent_requests); 550 qemu_co_mutex_lock(&s->lock); 551 } 552 } 553 554 static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, 555 int64_t sector_num, 556 int remaining_sectors, 557 QEMUIOVector *qiov) 558 { 559 BDRVQcowState *s = bs->opaque; 560 int index_in_cluster; 561 int n_end; 562 int ret; 563 int cur_nr_sectors; /* number of sectors in current iteration */ 564 uint64_t cluster_offset; 565 QEMUIOVector hd_qiov; 566 uint64_t bytes_done = 0; 567 uint8_t *cluster_data = NULL; 568 QCowL2Meta l2meta = { 569 .nb_clusters = 0, 570 }; 571 572 qemu_co_queue_init(&l2meta.dependent_requests); 573 574 qemu_iovec_init(&hd_qiov, qiov->niov); 575 576 s->cluster_cache_offset = -1; /* disable compressed cache */ 577 578 qemu_co_mutex_lock(&s->lock); 579 580 while (remaining_sectors != 0) { 581 582 index_in_cluster = sector_num & (s->cluster_sectors - 1); 583 n_end = index_in_cluster + remaining_sectors; 584 if (s->crypt_method && 585 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { 586 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; 587 } 588 589 ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, 590 index_in_cluster, n_end, &cur_nr_sectors, &l2meta); 591 if (ret < 0) { 592 goto fail; 593 } 594 595 cluster_offset = l2meta.cluster_offset; 596 assert((cluster_offset & 511) == 0); 597 598 qemu_iovec_reset(&hd_qiov); 599 qemu_iovec_copy(&hd_qiov, qiov, bytes_done, 600 cur_nr_sectors * 512); 601 602 if (s->crypt_method) { 603 if (!cluster_data) { 604 cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * 605 s->cluster_size); 606 } 607 608 assert(hd_qiov.size <= 609 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); 610 qemu_iovec_to_buffer(&hd_qiov, cluster_data); 611 612 qcow2_encrypt_sectors(s, sector_num, cluster_data, 613 cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); 614 615 qemu_iovec_reset(&hd_qiov); 616 qemu_iovec_add(&hd_qiov, cluster_data, 617 cur_nr_sectors * 512); 618 } 619 620 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 621 qemu_co_mutex_unlock(&s->lock); 622 ret = bdrv_co_writev(bs->file, 623 (cluster_offset >> 9) + index_in_cluster, 624 cur_nr_sectors, &hd_qiov); 625 qemu_co_mutex_lock(&s->lock); 626 if (ret < 0) { 627 goto fail; 628 } 629 630 ret = qcow2_alloc_cluster_link_l2(bs, &l2meta); 631 if (ret < 0) { 632 goto fail; 633 } 634 635 run_dependent_requests(s, &l2meta); 636 637 remaining_sectors -= cur_nr_sectors; 638 sector_num += cur_nr_sectors; 639 bytes_done += cur_nr_sectors * 512; 640 } 641 ret = 0; 642 643 fail: 644 run_dependent_requests(s, &l2meta); 645 646 qemu_co_mutex_unlock(&s->lock); 647 648 qemu_iovec_destroy(&hd_qiov); 649 qemu_vfree(cluster_data); 650 651 return ret; 652 } 653 654 static void qcow2_close(BlockDriverState *bs) 655 { 656 BDRVQcowState *s = bs->opaque; 657 g_free(s->l1_table); 658 659 qcow2_cache_flush(bs, s->l2_table_cache); 660 qcow2_cache_flush(bs, s->refcount_block_cache); 661 662 qcow2_cache_destroy(bs, s->l2_table_cache); 663 qcow2_cache_destroy(bs, s->refcount_block_cache); 664 665 cleanup_unknown_header_ext(bs); 666 g_free(s->cluster_cache); 667 qemu_vfree(s->cluster_data); 668 qcow2_refcount_close(bs); 669 qcow2_free_snapshots(bs); 670 } 671 672 static void qcow2_invalidate_cache(BlockDriverState *bs) 673 { 674 BDRVQcowState *s = bs->opaque; 675 int flags = s->flags; 676 AES_KEY aes_encrypt_key; 677 AES_KEY aes_decrypt_key; 678 uint32_t crypt_method = 0; 679 680 /* 681 * Backing files are read-only which makes all of their metadata immutable, 682 * that means we don't have to worry about reopening them here. 683 */ 684 685 if (s->crypt_method) { 686 crypt_method = s->crypt_method; 687 memcpy(&aes_encrypt_key, &s->aes_encrypt_key, sizeof(aes_encrypt_key)); 688 memcpy(&aes_decrypt_key, &s->aes_decrypt_key, sizeof(aes_decrypt_key)); 689 } 690 691 qcow2_close(bs); 692 693 memset(s, 0, sizeof(BDRVQcowState)); 694 qcow2_open(bs, flags); 695 696 if (crypt_method) { 697 s->crypt_method = crypt_method; 698 memcpy(&s->aes_encrypt_key, &aes_encrypt_key, sizeof(aes_encrypt_key)); 699 memcpy(&s->aes_decrypt_key, &aes_decrypt_key, sizeof(aes_decrypt_key)); 700 } 701 } 702 703 static size_t header_ext_add(char *buf, uint32_t magic, const void *s, 704 size_t len, size_t buflen) 705 { 706 QCowExtension *ext_backing_fmt = (QCowExtension*) buf; 707 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); 708 709 if (buflen < ext_len) { 710 return -ENOSPC; 711 } 712 713 *ext_backing_fmt = (QCowExtension) { 714 .magic = cpu_to_be32(magic), 715 .len = cpu_to_be32(len), 716 }; 717 memcpy(buf + sizeof(QCowExtension), s, len); 718 719 return ext_len; 720 } 721 722 /* 723 * Updates the qcow2 header, including the variable length parts of it, i.e. 724 * the backing file name and all extensions. qcow2 was not designed to allow 725 * such changes, so if we run out of space (we can only use the first cluster) 726 * this function may fail. 727 * 728 * Returns 0 on success, -errno in error cases. 729 */ 730 int qcow2_update_header(BlockDriverState *bs) 731 { 732 BDRVQcowState *s = bs->opaque; 733 QCowHeader *header; 734 char *buf; 735 size_t buflen = s->cluster_size; 736 int ret; 737 uint64_t total_size; 738 uint32_t refcount_table_clusters; 739 Qcow2UnknownHeaderExtension *uext; 740 741 buf = qemu_blockalign(bs, buflen); 742 memset(buf, 0, s->cluster_size); 743 744 /* Header structure */ 745 header = (QCowHeader*) buf; 746 747 if (buflen < sizeof(*header)) { 748 ret = -ENOSPC; 749 goto fail; 750 } 751 752 total_size = bs->total_sectors * BDRV_SECTOR_SIZE; 753 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); 754 755 *header = (QCowHeader) { 756 .magic = cpu_to_be32(QCOW_MAGIC), 757 .version = cpu_to_be32(QCOW_VERSION), 758 .backing_file_offset = 0, 759 .backing_file_size = 0, 760 .cluster_bits = cpu_to_be32(s->cluster_bits), 761 .size = cpu_to_be64(total_size), 762 .crypt_method = cpu_to_be32(s->crypt_method_header), 763 .l1_size = cpu_to_be32(s->l1_size), 764 .l1_table_offset = cpu_to_be64(s->l1_table_offset), 765 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), 766 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), 767 .nb_snapshots = cpu_to_be32(s->nb_snapshots), 768 .snapshots_offset = cpu_to_be64(s->snapshots_offset), 769 }; 770 771 buf += sizeof(*header); 772 buflen -= sizeof(*header); 773 774 /* Backing file format header extension */ 775 if (*bs->backing_format) { 776 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, 777 bs->backing_format, strlen(bs->backing_format), 778 buflen); 779 if (ret < 0) { 780 goto fail; 781 } 782 783 buf += ret; 784 buflen -= ret; 785 } 786 787 /* Keep unknown header extensions */ 788 QLIST_FOREACH(uext, &s->unknown_header_ext, next) { 789 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); 790 if (ret < 0) { 791 goto fail; 792 } 793 794 buf += ret; 795 buflen -= ret; 796 } 797 798 /* End of header extensions */ 799 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); 800 if (ret < 0) { 801 goto fail; 802 } 803 804 buf += ret; 805 buflen -= ret; 806 807 /* Backing file name */ 808 if (*bs->backing_file) { 809 size_t backing_file_len = strlen(bs->backing_file); 810 811 if (buflen < backing_file_len) { 812 ret = -ENOSPC; 813 goto fail; 814 } 815 816 strncpy(buf, bs->backing_file, buflen); 817 818 header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); 819 header->backing_file_size = cpu_to_be32(backing_file_len); 820 } 821 822 /* Write the new header */ 823 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); 824 if (ret < 0) { 825 goto fail; 826 } 827 828 ret = 0; 829 fail: 830 qemu_vfree(header); 831 return ret; 832 } 833 834 static int qcow2_change_backing_file(BlockDriverState *bs, 835 const char *backing_file, const char *backing_fmt) 836 { 837 /* Backing file format doesn't make sense without a backing file */ 838 if (backing_fmt && !backing_file) { 839 return -EINVAL; 840 } 841 842 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); 843 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); 844 845 return qcow2_update_header(bs); 846 } 847 848 static int preallocate(BlockDriverState *bs) 849 { 850 uint64_t nb_sectors; 851 uint64_t offset; 852 int num; 853 int ret; 854 QCowL2Meta meta; 855 856 nb_sectors = bdrv_getlength(bs) >> 9; 857 offset = 0; 858 qemu_co_queue_init(&meta.dependent_requests); 859 meta.cluster_offset = 0; 860 861 while (nb_sectors) { 862 num = MIN(nb_sectors, INT_MAX >> 9); 863 ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta); 864 if (ret < 0) { 865 return ret; 866 } 867 868 ret = qcow2_alloc_cluster_link_l2(bs, &meta); 869 if (ret < 0) { 870 qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters); 871 return ret; 872 } 873 874 /* There are no dependent requests, but we need to remove our request 875 * from the list of in-flight requests */ 876 run_dependent_requests(bs->opaque, &meta); 877 878 /* TODO Preallocate data if requested */ 879 880 nb_sectors -= num; 881 offset += num << 9; 882 } 883 884 /* 885 * It is expected that the image file is large enough to actually contain 886 * all of the allocated clusters (otherwise we get failing reads after 887 * EOF). Extend the image to the last allocated sector. 888 */ 889 if (meta.cluster_offset != 0) { 890 uint8_t buf[512]; 891 memset(buf, 0, 512); 892 ret = bdrv_write(bs->file, (meta.cluster_offset >> 9) + num - 1, buf, 1); 893 if (ret < 0) { 894 return ret; 895 } 896 } 897 898 return 0; 899 } 900 901 static int qcow2_create2(const char *filename, int64_t total_size, 902 const char *backing_file, const char *backing_format, 903 int flags, size_t cluster_size, int prealloc, 904 QEMUOptionParameter *options) 905 { 906 /* Calculate cluster_bits */ 907 int cluster_bits; 908 cluster_bits = ffs(cluster_size) - 1; 909 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || 910 (1 << cluster_bits) != cluster_size) 911 { 912 error_report( 913 "Cluster size must be a power of two between %d and %dk", 914 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); 915 return -EINVAL; 916 } 917 918 /* 919 * Open the image file and write a minimal qcow2 header. 920 * 921 * We keep things simple and start with a zero-sized image. We also 922 * do without refcount blocks or a L1 table for now. We'll fix the 923 * inconsistency later. 924 * 925 * We do need a refcount table because growing the refcount table means 926 * allocating two new refcount blocks - the seconds of which would be at 927 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file 928 * size for any qcow2 image. 929 */ 930 BlockDriverState* bs; 931 QCowHeader header; 932 uint8_t* refcount_table; 933 int ret; 934 935 ret = bdrv_create_file(filename, options); 936 if (ret < 0) { 937 return ret; 938 } 939 940 ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR); 941 if (ret < 0) { 942 return ret; 943 } 944 945 /* Write the header */ 946 memset(&header, 0, sizeof(header)); 947 header.magic = cpu_to_be32(QCOW_MAGIC); 948 header.version = cpu_to_be32(QCOW_VERSION); 949 header.cluster_bits = cpu_to_be32(cluster_bits); 950 header.size = cpu_to_be64(0); 951 header.l1_table_offset = cpu_to_be64(0); 952 header.l1_size = cpu_to_be32(0); 953 header.refcount_table_offset = cpu_to_be64(cluster_size); 954 header.refcount_table_clusters = cpu_to_be32(1); 955 956 if (flags & BLOCK_FLAG_ENCRYPT) { 957 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); 958 } else { 959 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); 960 } 961 962 ret = bdrv_pwrite(bs, 0, &header, sizeof(header)); 963 if (ret < 0) { 964 goto out; 965 } 966 967 /* Write an empty refcount table */ 968 refcount_table = g_malloc0(cluster_size); 969 ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); 970 g_free(refcount_table); 971 972 if (ret < 0) { 973 goto out; 974 } 975 976 bdrv_close(bs); 977 978 /* 979 * And now open the image and make it consistent first (i.e. increase the 980 * refcount of the cluster that is occupied by the header and the refcount 981 * table) 982 */ 983 BlockDriver* drv = bdrv_find_format("qcow2"); 984 assert(drv != NULL); 985 ret = bdrv_open(bs, filename, 986 BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv); 987 if (ret < 0) { 988 goto out; 989 } 990 991 ret = qcow2_alloc_clusters(bs, 2 * cluster_size); 992 if (ret < 0) { 993 goto out; 994 995 } else if (ret != 0) { 996 error_report("Huh, first cluster in empty image is already in use?"); 997 abort(); 998 } 999 1000 /* Okay, now that we have a valid image, let's give it the right size */ 1001 ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); 1002 if (ret < 0) { 1003 goto out; 1004 } 1005 1006 /* Want a backing file? There you go.*/ 1007 if (backing_file) { 1008 ret = bdrv_change_backing_file(bs, backing_file, backing_format); 1009 if (ret < 0) { 1010 goto out; 1011 } 1012 } 1013 1014 /* And if we're supposed to preallocate metadata, do that now */ 1015 if (prealloc) { 1016 ret = preallocate(bs); 1017 if (ret < 0) { 1018 goto out; 1019 } 1020 } 1021 1022 ret = 0; 1023 out: 1024 bdrv_delete(bs); 1025 return ret; 1026 } 1027 1028 static int qcow2_create(const char *filename, QEMUOptionParameter *options) 1029 { 1030 const char *backing_file = NULL; 1031 const char *backing_fmt = NULL; 1032 uint64_t sectors = 0; 1033 int flags = 0; 1034 size_t cluster_size = DEFAULT_CLUSTER_SIZE; 1035 int prealloc = 0; 1036 1037 /* Read out options */ 1038 while (options && options->name) { 1039 if (!strcmp(options->name, BLOCK_OPT_SIZE)) { 1040 sectors = options->value.n / 512; 1041 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { 1042 backing_file = options->value.s; 1043 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { 1044 backing_fmt = options->value.s; 1045 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { 1046 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; 1047 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { 1048 if (options->value.n) { 1049 cluster_size = options->value.n; 1050 } 1051 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { 1052 if (!options->value.s || !strcmp(options->value.s, "off")) { 1053 prealloc = 0; 1054 } else if (!strcmp(options->value.s, "metadata")) { 1055 prealloc = 1; 1056 } else { 1057 fprintf(stderr, "Invalid preallocation mode: '%s'\n", 1058 options->value.s); 1059 return -EINVAL; 1060 } 1061 } 1062 options++; 1063 } 1064 1065 if (backing_file && prealloc) { 1066 fprintf(stderr, "Backing file and preallocation cannot be used at " 1067 "the same time\n"); 1068 return -EINVAL; 1069 } 1070 1071 return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags, 1072 cluster_size, prealloc, options); 1073 } 1074 1075 static int qcow2_make_empty(BlockDriverState *bs) 1076 { 1077 #if 0 1078 /* XXX: not correct */ 1079 BDRVQcowState *s = bs->opaque; 1080 uint32_t l1_length = s->l1_size * sizeof(uint64_t); 1081 int ret; 1082 1083 memset(s->l1_table, 0, l1_length); 1084 if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0) 1085 return -1; 1086 ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length); 1087 if (ret < 0) 1088 return ret; 1089 1090 l2_cache_reset(bs); 1091 #endif 1092 return 0; 1093 } 1094 1095 static coroutine_fn int qcow2_co_discard(BlockDriverState *bs, 1096 int64_t sector_num, int nb_sectors) 1097 { 1098 int ret; 1099 BDRVQcowState *s = bs->opaque; 1100 1101 qemu_co_mutex_lock(&s->lock); 1102 ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, 1103 nb_sectors); 1104 qemu_co_mutex_unlock(&s->lock); 1105 return ret; 1106 } 1107 1108 static int qcow2_truncate(BlockDriverState *bs, int64_t offset) 1109 { 1110 BDRVQcowState *s = bs->opaque; 1111 int ret, new_l1_size; 1112 1113 if (offset & 511) { 1114 return -EINVAL; 1115 } 1116 1117 /* cannot proceed if image has snapshots */ 1118 if (s->nb_snapshots) { 1119 return -ENOTSUP; 1120 } 1121 1122 /* shrinking is currently not supported */ 1123 if (offset < bs->total_sectors * 512) { 1124 return -ENOTSUP; 1125 } 1126 1127 new_l1_size = size_to_l1(s, offset); 1128 ret = qcow2_grow_l1_table(bs, new_l1_size, true); 1129 if (ret < 0) { 1130 return ret; 1131 } 1132 1133 /* write updated header.size */ 1134 offset = cpu_to_be64(offset); 1135 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), 1136 &offset, sizeof(uint64_t)); 1137 if (ret < 0) { 1138 return ret; 1139 } 1140 1141 s->l1_vm_state_index = new_l1_size; 1142 return 0; 1143 } 1144 1145 /* XXX: put compressed sectors first, then all the cluster aligned 1146 tables to avoid losing bytes in alignment */ 1147 static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, 1148 const uint8_t *buf, int nb_sectors) 1149 { 1150 BDRVQcowState *s = bs->opaque; 1151 z_stream strm; 1152 int ret, out_len; 1153 uint8_t *out_buf; 1154 uint64_t cluster_offset; 1155 1156 if (nb_sectors == 0) { 1157 /* align end of file to a sector boundary to ease reading with 1158 sector based I/Os */ 1159 cluster_offset = bdrv_getlength(bs->file); 1160 cluster_offset = (cluster_offset + 511) & ~511; 1161 bdrv_truncate(bs->file, cluster_offset); 1162 return 0; 1163 } 1164 1165 if (nb_sectors != s->cluster_sectors) 1166 return -EINVAL; 1167 1168 out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); 1169 1170 /* best compression, small window, no zlib header */ 1171 memset(&strm, 0, sizeof(strm)); 1172 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, 1173 Z_DEFLATED, -12, 1174 9, Z_DEFAULT_STRATEGY); 1175 if (ret != 0) { 1176 ret = -EINVAL; 1177 goto fail; 1178 } 1179 1180 strm.avail_in = s->cluster_size; 1181 strm.next_in = (uint8_t *)buf; 1182 strm.avail_out = s->cluster_size; 1183 strm.next_out = out_buf; 1184 1185 ret = deflate(&strm, Z_FINISH); 1186 if (ret != Z_STREAM_END && ret != Z_OK) { 1187 deflateEnd(&strm); 1188 ret = -EINVAL; 1189 goto fail; 1190 } 1191 out_len = strm.next_out - out_buf; 1192 1193 deflateEnd(&strm); 1194 1195 if (ret != Z_STREAM_END || out_len >= s->cluster_size) { 1196 /* could not compress: write normal cluster */ 1197 ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors); 1198 if (ret < 0) { 1199 goto fail; 1200 } 1201 } else { 1202 cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, 1203 sector_num << 9, out_len); 1204 if (!cluster_offset) { 1205 ret = -EIO; 1206 goto fail; 1207 } 1208 cluster_offset &= s->cluster_offset_mask; 1209 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); 1210 ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len); 1211 if (ret < 0) { 1212 goto fail; 1213 } 1214 } 1215 1216 ret = 0; 1217 fail: 1218 g_free(out_buf); 1219 return ret; 1220 } 1221 1222 static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) 1223 { 1224 BDRVQcowState *s = bs->opaque; 1225 int ret; 1226 1227 qemu_co_mutex_lock(&s->lock); 1228 ret = qcow2_cache_flush(bs, s->l2_table_cache); 1229 if (ret < 0) { 1230 qemu_co_mutex_unlock(&s->lock); 1231 return ret; 1232 } 1233 1234 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 1235 if (ret < 0) { 1236 qemu_co_mutex_unlock(&s->lock); 1237 return ret; 1238 } 1239 qemu_co_mutex_unlock(&s->lock); 1240 1241 return 0; 1242 } 1243 1244 static coroutine_fn int qcow2_co_flush_to_disk(BlockDriverState *bs) 1245 { 1246 return bdrv_co_flush(bs->file); 1247 } 1248 1249 static int64_t qcow2_vm_state_offset(BDRVQcowState *s) 1250 { 1251 return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits); 1252 } 1253 1254 static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) 1255 { 1256 BDRVQcowState *s = bs->opaque; 1257 bdi->cluster_size = s->cluster_size; 1258 bdi->vm_state_offset = qcow2_vm_state_offset(s); 1259 return 0; 1260 } 1261 1262 1263 static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result) 1264 { 1265 return qcow2_check_refcounts(bs, result); 1266 } 1267 1268 #if 0 1269 static void dump_refcounts(BlockDriverState *bs) 1270 { 1271 BDRVQcowState *s = bs->opaque; 1272 int64_t nb_clusters, k, k1, size; 1273 int refcount; 1274 1275 size = bdrv_getlength(bs->file); 1276 nb_clusters = size_to_clusters(s, size); 1277 for(k = 0; k < nb_clusters;) { 1278 k1 = k; 1279 refcount = get_refcount(bs, k); 1280 k++; 1281 while (k < nb_clusters && get_refcount(bs, k) == refcount) 1282 k++; 1283 printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, 1284 k - k1); 1285 } 1286 } 1287 #endif 1288 1289 static int qcow2_save_vmstate(BlockDriverState *bs, const uint8_t *buf, 1290 int64_t pos, int size) 1291 { 1292 BDRVQcowState *s = bs->opaque; 1293 int growable = bs->growable; 1294 int ret; 1295 1296 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); 1297 bs->growable = 1; 1298 ret = bdrv_pwrite(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1299 bs->growable = growable; 1300 1301 return ret; 1302 } 1303 1304 static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf, 1305 int64_t pos, int size) 1306 { 1307 BDRVQcowState *s = bs->opaque; 1308 int growable = bs->growable; 1309 int ret; 1310 1311 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); 1312 bs->growable = 1; 1313 ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size); 1314 bs->growable = growable; 1315 1316 return ret; 1317 } 1318 1319 static QEMUOptionParameter qcow2_create_options[] = { 1320 { 1321 .name = BLOCK_OPT_SIZE, 1322 .type = OPT_SIZE, 1323 .help = "Virtual disk size" 1324 }, 1325 { 1326 .name = BLOCK_OPT_BACKING_FILE, 1327 .type = OPT_STRING, 1328 .help = "File name of a base image" 1329 }, 1330 { 1331 .name = BLOCK_OPT_BACKING_FMT, 1332 .type = OPT_STRING, 1333 .help = "Image format of the base image" 1334 }, 1335 { 1336 .name = BLOCK_OPT_ENCRYPT, 1337 .type = OPT_FLAG, 1338 .help = "Encrypt the image" 1339 }, 1340 { 1341 .name = BLOCK_OPT_CLUSTER_SIZE, 1342 .type = OPT_SIZE, 1343 .help = "qcow2 cluster size", 1344 .value = { .n = DEFAULT_CLUSTER_SIZE }, 1345 }, 1346 { 1347 .name = BLOCK_OPT_PREALLOC, 1348 .type = OPT_STRING, 1349 .help = "Preallocation mode (allowed values: off, metadata)" 1350 }, 1351 { NULL } 1352 }; 1353 1354 static BlockDriver bdrv_qcow2 = { 1355 .format_name = "qcow2", 1356 .instance_size = sizeof(BDRVQcowState), 1357 .bdrv_probe = qcow2_probe, 1358 .bdrv_open = qcow2_open, 1359 .bdrv_close = qcow2_close, 1360 .bdrv_create = qcow2_create, 1361 .bdrv_co_is_allocated = qcow2_co_is_allocated, 1362 .bdrv_set_key = qcow2_set_key, 1363 .bdrv_make_empty = qcow2_make_empty, 1364 1365 .bdrv_co_readv = qcow2_co_readv, 1366 .bdrv_co_writev = qcow2_co_writev, 1367 .bdrv_co_flush_to_os = qcow2_co_flush_to_os, 1368 .bdrv_co_flush_to_disk = qcow2_co_flush_to_disk, 1369 1370 .bdrv_co_discard = qcow2_co_discard, 1371 .bdrv_truncate = qcow2_truncate, 1372 .bdrv_write_compressed = qcow2_write_compressed, 1373 1374 .bdrv_snapshot_create = qcow2_snapshot_create, 1375 .bdrv_snapshot_goto = qcow2_snapshot_goto, 1376 .bdrv_snapshot_delete = qcow2_snapshot_delete, 1377 .bdrv_snapshot_list = qcow2_snapshot_list, 1378 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, 1379 .bdrv_get_info = qcow2_get_info, 1380 1381 .bdrv_save_vmstate = qcow2_save_vmstate, 1382 .bdrv_load_vmstate = qcow2_load_vmstate, 1383 1384 .bdrv_change_backing_file = qcow2_change_backing_file, 1385 1386 .bdrv_invalidate_cache = qcow2_invalidate_cache, 1387 1388 .create_options = qcow2_create_options, 1389 .bdrv_check = qcow2_check, 1390 }; 1391 1392 static void bdrv_qcow2_init(void) 1393 { 1394 bdrv_register(&bdrv_qcow2); 1395 } 1396 1397 block_init(bdrv_qcow2_init); 1398