1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "block/qcow2.h" 32 #include "qemu/bswap.h" 33 #include "trace.h" 34 35 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 36 bool exact_size) 37 { 38 BDRVQcow2State *s = bs->opaque; 39 int new_l1_size2, ret, i; 40 uint64_t *new_l1_table; 41 int64_t old_l1_table_offset, old_l1_size; 42 int64_t new_l1_table_offset, new_l1_size; 43 uint8_t data[12]; 44 45 if (min_size <= s->l1_size) 46 return 0; 47 48 /* Do a sanity check on min_size before trying to calculate new_l1_size 49 * (this prevents overflows during the while loop for the calculation of 50 * new_l1_size) */ 51 if (min_size > INT_MAX / sizeof(uint64_t)) { 52 return -EFBIG; 53 } 54 55 if (exact_size) { 56 new_l1_size = min_size; 57 } else { 58 /* Bump size up to reduce the number of times we have to grow */ 59 new_l1_size = s->l1_size; 60 if (new_l1_size == 0) { 61 new_l1_size = 1; 62 } 63 while (min_size > new_l1_size) { 64 new_l1_size = (new_l1_size * 3 + 1) / 2; 65 } 66 } 67 68 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 69 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 70 return -EFBIG; 71 } 72 73 #ifdef DEBUG_ALLOC2 74 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 75 s->l1_size, new_l1_size); 76 #endif 77 78 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 79 new_l1_table = qemu_try_blockalign(bs->file->bs, 80 align_offset(new_l1_size2, 512)); 81 if (new_l1_table == NULL) { 82 return -ENOMEM; 83 } 84 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 85 86 if (s->l1_size) { 87 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 88 } 89 90 /* write new table (align to cluster) */ 91 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 92 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 93 if (new_l1_table_offset < 0) { 94 qemu_vfree(new_l1_table); 95 return new_l1_table_offset; 96 } 97 98 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 99 if (ret < 0) { 100 goto fail; 101 } 102 103 /* the L1 position has not yet been updated, so these clusters must 104 * indeed be completely free */ 105 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 106 new_l1_size2); 107 if (ret < 0) { 108 goto fail; 109 } 110 111 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 112 for(i = 0; i < s->l1_size; i++) 113 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 114 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 115 new_l1_table, new_l1_size2); 116 if (ret < 0) 117 goto fail; 118 for(i = 0; i < s->l1_size; i++) 119 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 120 121 /* set new table */ 122 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 123 stl_be_p(data, new_l1_size); 124 stq_be_p(data + 4, new_l1_table_offset); 125 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 126 data, sizeof(data)); 127 if (ret < 0) { 128 goto fail; 129 } 130 qemu_vfree(s->l1_table); 131 old_l1_table_offset = s->l1_table_offset; 132 s->l1_table_offset = new_l1_table_offset; 133 s->l1_table = new_l1_table; 134 old_l1_size = s->l1_size; 135 s->l1_size = new_l1_size; 136 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 137 QCOW2_DISCARD_OTHER); 138 return 0; 139 fail: 140 qemu_vfree(new_l1_table); 141 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 142 QCOW2_DISCARD_OTHER); 143 return ret; 144 } 145 146 /* 147 * l2_load 148 * 149 * Loads a L2 table into memory. If the table is in the cache, the cache 150 * is used; otherwise the L2 table is loaded from the image file. 151 * 152 * Returns a pointer to the L2 table on success, or NULL if the read from 153 * the image file failed. 154 */ 155 156 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 157 uint64_t **l2_table) 158 { 159 BDRVQcow2State *s = bs->opaque; 160 161 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 162 (void **)l2_table); 163 } 164 165 /* 166 * Writes one sector of the L1 table to the disk (can't update single entries 167 * and we really don't want bdrv_pread to perform a read-modify-write) 168 */ 169 #define L1_ENTRIES_PER_SECTOR (512 / 8) 170 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 171 { 172 BDRVQcow2State *s = bs->opaque; 173 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 174 int l1_start_index; 175 int i, ret; 176 177 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 178 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 179 i++) 180 { 181 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 182 } 183 184 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 185 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 186 if (ret < 0) { 187 return ret; 188 } 189 190 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 191 ret = bdrv_pwrite_sync(bs->file, 192 s->l1_table_offset + 8 * l1_start_index, 193 buf, sizeof(buf)); 194 if (ret < 0) { 195 return ret; 196 } 197 198 return 0; 199 } 200 201 /* 202 * l2_allocate 203 * 204 * Allocate a new l2 entry in the file. If l1_index points to an already 205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 206 * table) copy the contents of the old L2 table into the newly allocated one. 207 * Otherwise the new table is initialized with zeros. 208 * 209 */ 210 211 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 212 { 213 BDRVQcow2State *s = bs->opaque; 214 uint64_t old_l2_offset; 215 uint64_t *l2_table = NULL; 216 int64_t l2_offset; 217 int ret; 218 219 old_l2_offset = s->l1_table[l1_index]; 220 221 trace_qcow2_l2_allocate(bs, l1_index); 222 223 /* allocate a new l2 entry */ 224 225 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 226 if (l2_offset < 0) { 227 ret = l2_offset; 228 goto fail; 229 } 230 231 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 232 if (ret < 0) { 233 goto fail; 234 } 235 236 /* allocate a new entry in the l2 cache */ 237 238 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 239 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 240 if (ret < 0) { 241 goto fail; 242 } 243 244 l2_table = *table; 245 246 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 247 /* if there was no old l2 table, clear the new table */ 248 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 249 } else { 250 uint64_t* old_table; 251 252 /* if there was an old l2 table, read it from the disk */ 253 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 254 ret = qcow2_cache_get(bs, s->l2_table_cache, 255 old_l2_offset & L1E_OFFSET_MASK, 256 (void**) &old_table); 257 if (ret < 0) { 258 goto fail; 259 } 260 261 memcpy(l2_table, old_table, s->cluster_size); 262 263 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 264 } 265 266 /* write the l2 table to the file */ 267 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 268 269 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 270 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 271 ret = qcow2_cache_flush(bs, s->l2_table_cache); 272 if (ret < 0) { 273 goto fail; 274 } 275 276 /* update the L1 entry */ 277 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 278 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 279 ret = qcow2_write_l1_entry(bs, l1_index); 280 if (ret < 0) { 281 goto fail; 282 } 283 284 *table = l2_table; 285 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 286 return 0; 287 288 fail: 289 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 290 if (l2_table != NULL) { 291 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 292 } 293 s->l1_table[l1_index] = old_l2_offset; 294 if (l2_offset > 0) { 295 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 296 QCOW2_DISCARD_ALWAYS); 297 } 298 return ret; 299 } 300 301 /* 302 * Checks how many clusters in a given L2 table are contiguous in the image 303 * file. As soon as one of the flags in the bitmask stop_flags changes compared 304 * to the first cluster, the search is stopped and the cluster is not counted 305 * as contiguous. (This allows it, for example, to stop at the first compressed 306 * cluster which may require a different handling) 307 */ 308 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 309 uint64_t *l2_table, uint64_t stop_flags) 310 { 311 int i; 312 QCow2ClusterType first_cluster_type; 313 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 314 uint64_t first_entry = be64_to_cpu(l2_table[0]); 315 uint64_t offset = first_entry & mask; 316 317 if (!offset) { 318 return 0; 319 } 320 321 /* must be allocated */ 322 first_cluster_type = qcow2_get_cluster_type(first_entry); 323 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 324 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 325 326 for (i = 0; i < nb_clusters; i++) { 327 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 328 if (offset + (uint64_t) i * cluster_size != l2_entry) { 329 break; 330 } 331 } 332 333 return i; 334 } 335 336 /* 337 * Checks how many consecutive unallocated clusters in a given L2 338 * table have the same cluster type. 339 */ 340 static int count_contiguous_clusters_unallocated(int nb_clusters, 341 uint64_t *l2_table, 342 QCow2ClusterType wanted_type) 343 { 344 int i; 345 346 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 347 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 348 for (i = 0; i < nb_clusters; i++) { 349 uint64_t entry = be64_to_cpu(l2_table[i]); 350 QCow2ClusterType type = qcow2_get_cluster_type(entry); 351 352 if (type != wanted_type) { 353 break; 354 } 355 } 356 357 return i; 358 } 359 360 /* The crypt function is compatible with the linux cryptoloop 361 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 362 supported */ 363 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, 364 uint8_t *out_buf, const uint8_t *in_buf, 365 int nb_sectors, bool enc, 366 Error **errp) 367 { 368 union { 369 uint64_t ll[2]; 370 uint8_t b[16]; 371 } ivec; 372 int i; 373 int ret; 374 375 for(i = 0; i < nb_sectors; i++) { 376 ivec.ll[0] = cpu_to_le64(sector_num); 377 ivec.ll[1] = 0; 378 if (qcrypto_cipher_setiv(s->cipher, 379 ivec.b, G_N_ELEMENTS(ivec.b), 380 errp) < 0) { 381 return -1; 382 } 383 if (enc) { 384 ret = qcrypto_cipher_encrypt(s->cipher, 385 in_buf, 386 out_buf, 387 512, 388 errp); 389 } else { 390 ret = qcrypto_cipher_decrypt(s->cipher, 391 in_buf, 392 out_buf, 393 512, 394 errp); 395 } 396 if (ret < 0) { 397 return -1; 398 } 399 sector_num++; 400 in_buf += 512; 401 out_buf += 512; 402 } 403 return 0; 404 } 405 406 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 407 uint64_t src_cluster_offset, 408 unsigned offset_in_cluster, 409 QEMUIOVector *qiov) 410 { 411 int ret; 412 413 if (qiov->size == 0) { 414 return 0; 415 } 416 417 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 418 419 if (!bs->drv) { 420 return -ENOMEDIUM; 421 } 422 423 /* Call .bdrv_co_readv() directly instead of using the public block-layer 424 * interface. This avoids double I/O throttling and request tracking, 425 * which can lead to deadlock when block layer copy-on-read is enabled. 426 */ 427 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 428 qiov->size, qiov, 0); 429 if (ret < 0) { 430 return ret; 431 } 432 433 return 0; 434 } 435 436 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, 437 uint64_t src_cluster_offset, 438 unsigned offset_in_cluster, 439 uint8_t *buffer, 440 unsigned bytes) 441 { 442 if (bytes && bs->encrypted) { 443 BDRVQcow2State *s = bs->opaque; 444 int64_t sector = (src_cluster_offset + offset_in_cluster) 445 >> BDRV_SECTOR_BITS; 446 assert(s->cipher); 447 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 448 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 449 if (qcow2_encrypt_sectors(s, sector, buffer, buffer, 450 bytes >> BDRV_SECTOR_BITS, true, NULL) < 0) { 451 return false; 452 } 453 } 454 return true; 455 } 456 457 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 458 uint64_t cluster_offset, 459 unsigned offset_in_cluster, 460 QEMUIOVector *qiov) 461 { 462 int ret; 463 464 if (qiov->size == 0) { 465 return 0; 466 } 467 468 ret = qcow2_pre_write_overlap_check(bs, 0, 469 cluster_offset + offset_in_cluster, qiov->size); 470 if (ret < 0) { 471 return ret; 472 } 473 474 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 475 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, 476 qiov->size, qiov, 0); 477 if (ret < 0) { 478 return ret; 479 } 480 481 return 0; 482 } 483 484 485 /* 486 * get_cluster_offset 487 * 488 * For a given offset of the virtual disk, find the cluster type and offset in 489 * the qcow2 file. The offset is stored in *cluster_offset. 490 * 491 * On entry, *bytes is the maximum number of contiguous bytes starting at 492 * offset that we are interested in. 493 * 494 * On exit, *bytes is the number of bytes starting at offset that have the same 495 * cluster type and (if applicable) are stored contiguously in the image file. 496 * Compressed clusters are always returned one by one. 497 * 498 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 499 * cases. 500 */ 501 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 502 unsigned int *bytes, uint64_t *cluster_offset) 503 { 504 BDRVQcow2State *s = bs->opaque; 505 unsigned int l2_index; 506 uint64_t l1_index, l2_offset, *l2_table; 507 int l1_bits, c; 508 unsigned int offset_in_cluster; 509 uint64_t bytes_available, bytes_needed, nb_clusters; 510 QCow2ClusterType type; 511 int ret; 512 513 offset_in_cluster = offset_into_cluster(s, offset); 514 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 515 516 l1_bits = s->l2_bits + s->cluster_bits; 517 518 /* compute how many bytes there are between the start of the cluster 519 * containing offset and the end of the l1 entry */ 520 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)) 521 + offset_in_cluster; 522 523 if (bytes_needed > bytes_available) { 524 bytes_needed = bytes_available; 525 } 526 527 *cluster_offset = 0; 528 529 /* seek to the l2 offset in the l1 table */ 530 531 l1_index = offset >> l1_bits; 532 if (l1_index >= s->l1_size) { 533 type = QCOW2_CLUSTER_UNALLOCATED; 534 goto out; 535 } 536 537 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 538 if (!l2_offset) { 539 type = QCOW2_CLUSTER_UNALLOCATED; 540 goto out; 541 } 542 543 if (offset_into_cluster(s, l2_offset)) { 544 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 545 " unaligned (L1 index: %#" PRIx64 ")", 546 l2_offset, l1_index); 547 return -EIO; 548 } 549 550 /* load the l2 table in memory */ 551 552 ret = l2_load(bs, l2_offset, &l2_table); 553 if (ret < 0) { 554 return ret; 555 } 556 557 /* find the cluster offset for the given disk offset */ 558 559 l2_index = offset_to_l2_index(s, offset); 560 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 561 562 nb_clusters = size_to_clusters(s, bytes_needed); 563 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 564 * integers; the minimum cluster size is 512, so this assertion is always 565 * true */ 566 assert(nb_clusters <= INT_MAX); 567 568 type = qcow2_get_cluster_type(*cluster_offset); 569 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 570 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 571 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 572 " in pre-v3 image (L2 offset: %#" PRIx64 573 ", L2 index: %#x)", l2_offset, l2_index); 574 ret = -EIO; 575 goto fail; 576 } 577 switch (type) { 578 case QCOW2_CLUSTER_COMPRESSED: 579 /* Compressed clusters can only be processed one by one */ 580 c = 1; 581 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 582 break; 583 case QCOW2_CLUSTER_ZERO_PLAIN: 584 case QCOW2_CLUSTER_UNALLOCATED: 585 /* how many empty clusters ? */ 586 c = count_contiguous_clusters_unallocated(nb_clusters, 587 &l2_table[l2_index], type); 588 *cluster_offset = 0; 589 break; 590 case QCOW2_CLUSTER_ZERO_ALLOC: 591 case QCOW2_CLUSTER_NORMAL: 592 /* how many allocated clusters ? */ 593 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 594 &l2_table[l2_index], QCOW_OFLAG_ZERO); 595 *cluster_offset &= L2E_OFFSET_MASK; 596 if (offset_into_cluster(s, *cluster_offset)) { 597 qcow2_signal_corruption(bs, true, -1, -1, 598 "Cluster allocation offset %#" 599 PRIx64 " unaligned (L2 offset: %#" PRIx64 600 ", L2 index: %#x)", *cluster_offset, 601 l2_offset, l2_index); 602 ret = -EIO; 603 goto fail; 604 } 605 break; 606 default: 607 abort(); 608 } 609 610 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 611 612 bytes_available = (int64_t)c * s->cluster_size; 613 614 out: 615 if (bytes_available > bytes_needed) { 616 bytes_available = bytes_needed; 617 } 618 619 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 620 * subtracting offset_in_cluster will therefore definitely yield something 621 * not exceeding UINT_MAX */ 622 assert(bytes_available - offset_in_cluster <= UINT_MAX); 623 *bytes = bytes_available - offset_in_cluster; 624 625 return type; 626 627 fail: 628 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 629 return ret; 630 } 631 632 /* 633 * get_cluster_table 634 * 635 * for a given disk offset, load (and allocate if needed) 636 * the l2 table. 637 * 638 * the l2 table offset in the qcow2 file and the cluster index 639 * in the l2 table are given to the caller. 640 * 641 * Returns 0 on success, -errno in failure case 642 */ 643 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 644 uint64_t **new_l2_table, 645 int *new_l2_index) 646 { 647 BDRVQcow2State *s = bs->opaque; 648 unsigned int l2_index; 649 uint64_t l1_index, l2_offset; 650 uint64_t *l2_table = NULL; 651 int ret; 652 653 /* seek to the l2 offset in the l1 table */ 654 655 l1_index = offset >> (s->l2_bits + s->cluster_bits); 656 if (l1_index >= s->l1_size) { 657 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 658 if (ret < 0) { 659 return ret; 660 } 661 } 662 663 assert(l1_index < s->l1_size); 664 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 665 if (offset_into_cluster(s, l2_offset)) { 666 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 667 " unaligned (L1 index: %#" PRIx64 ")", 668 l2_offset, l1_index); 669 return -EIO; 670 } 671 672 /* seek the l2 table of the given l2 offset */ 673 674 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 675 /* load the l2 table in memory */ 676 ret = l2_load(bs, l2_offset, &l2_table); 677 if (ret < 0) { 678 return ret; 679 } 680 } else { 681 /* First allocate a new L2 table (and do COW if needed) */ 682 ret = l2_allocate(bs, l1_index, &l2_table); 683 if (ret < 0) { 684 return ret; 685 } 686 687 /* Then decrease the refcount of the old table */ 688 if (l2_offset) { 689 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 690 QCOW2_DISCARD_OTHER); 691 } 692 } 693 694 /* find the cluster offset for the given disk offset */ 695 696 l2_index = offset_to_l2_index(s, offset); 697 698 *new_l2_table = l2_table; 699 *new_l2_index = l2_index; 700 701 return 0; 702 } 703 704 /* 705 * alloc_compressed_cluster_offset 706 * 707 * For a given offset of the disk image, return cluster offset in 708 * qcow2 file. 709 * 710 * If the offset is not found, allocate a new compressed cluster. 711 * 712 * Return the cluster offset if successful, 713 * Return 0, otherwise. 714 * 715 */ 716 717 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 718 uint64_t offset, 719 int compressed_size) 720 { 721 BDRVQcow2State *s = bs->opaque; 722 int l2_index, ret; 723 uint64_t *l2_table; 724 int64_t cluster_offset; 725 int nb_csectors; 726 727 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 728 if (ret < 0) { 729 return 0; 730 } 731 732 /* Compression can't overwrite anything. Fail if the cluster was already 733 * allocated. */ 734 cluster_offset = be64_to_cpu(l2_table[l2_index]); 735 if (cluster_offset & L2E_OFFSET_MASK) { 736 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 737 return 0; 738 } 739 740 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 741 if (cluster_offset < 0) { 742 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 743 return 0; 744 } 745 746 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 747 (cluster_offset >> 9); 748 749 cluster_offset |= QCOW_OFLAG_COMPRESSED | 750 ((uint64_t)nb_csectors << s->csize_shift); 751 752 /* update L2 table */ 753 754 /* compressed clusters never have the copied flag */ 755 756 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 757 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 758 l2_table[l2_index] = cpu_to_be64(cluster_offset); 759 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 760 761 return cluster_offset; 762 } 763 764 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 765 { 766 BDRVQcow2State *s = bs->opaque; 767 Qcow2COWRegion *start = &m->cow_start; 768 Qcow2COWRegion *end = &m->cow_end; 769 unsigned buffer_size; 770 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 771 bool merge_reads; 772 uint8_t *start_buffer, *end_buffer; 773 QEMUIOVector qiov; 774 int ret; 775 776 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 777 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 778 assert(start->offset + start->nb_bytes <= end->offset); 779 assert(!m->data_qiov || m->data_qiov->size == data_bytes); 780 781 if (start->nb_bytes == 0 && end->nb_bytes == 0) { 782 return 0; 783 } 784 785 /* If we have to read both the start and end COW regions and the 786 * middle region is not too large then perform just one read 787 * operation */ 788 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 789 if (merge_reads) { 790 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 791 } else { 792 /* If we have to do two reads, add some padding in the middle 793 * if necessary to make sure that the end region is optimally 794 * aligned. */ 795 size_t align = bdrv_opt_mem_align(bs); 796 assert(align > 0 && align <= UINT_MAX); 797 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 798 UINT_MAX - end->nb_bytes); 799 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 800 } 801 802 /* Reserve a buffer large enough to store all the data that we're 803 * going to read */ 804 start_buffer = qemu_try_blockalign(bs, buffer_size); 805 if (start_buffer == NULL) { 806 return -ENOMEM; 807 } 808 /* The part of the buffer where the end region is located */ 809 end_buffer = start_buffer + buffer_size - end->nb_bytes; 810 811 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); 812 813 qemu_co_mutex_unlock(&s->lock); 814 /* First we read the existing data from both COW regions. We 815 * either read the whole region in one go, or the start and end 816 * regions separately. */ 817 if (merge_reads) { 818 qemu_iovec_add(&qiov, start_buffer, buffer_size); 819 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 820 } else { 821 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 822 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 823 if (ret < 0) { 824 goto fail; 825 } 826 827 qemu_iovec_reset(&qiov); 828 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 829 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 830 } 831 if (ret < 0) { 832 goto fail; 833 } 834 835 /* Encrypt the data if necessary before writing it */ 836 if (bs->encrypted) { 837 if (!do_perform_cow_encrypt(bs, m->offset, start->offset, 838 start_buffer, start->nb_bytes) || 839 !do_perform_cow_encrypt(bs, m->offset, end->offset, 840 end_buffer, end->nb_bytes)) { 841 ret = -EIO; 842 goto fail; 843 } 844 } 845 846 /* And now we can write everything. If we have the guest data we 847 * can write everything in one single operation */ 848 if (m->data_qiov) { 849 qemu_iovec_reset(&qiov); 850 if (start->nb_bytes) { 851 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 852 } 853 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); 854 if (end->nb_bytes) { 855 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 856 } 857 /* NOTE: we have a write_aio blkdebug event here followed by 858 * a cow_write one in do_perform_cow_write(), but there's only 859 * one single I/O operation */ 860 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 861 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 862 } else { 863 /* If there's no guest data then write both COW regions separately */ 864 qemu_iovec_reset(&qiov); 865 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 866 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 867 if (ret < 0) { 868 goto fail; 869 } 870 871 qemu_iovec_reset(&qiov); 872 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 873 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 874 } 875 876 fail: 877 qemu_co_mutex_lock(&s->lock); 878 879 /* 880 * Before we update the L2 table to actually point to the new cluster, we 881 * need to be sure that the refcounts have been increased and COW was 882 * handled. 883 */ 884 if (ret == 0) { 885 qcow2_cache_depends_on_flush(s->l2_table_cache); 886 } 887 888 qemu_vfree(start_buffer); 889 qemu_iovec_destroy(&qiov); 890 return ret; 891 } 892 893 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 894 { 895 BDRVQcow2State *s = bs->opaque; 896 int i, j = 0, l2_index, ret; 897 uint64_t *old_cluster, *l2_table; 898 uint64_t cluster_offset = m->alloc_offset; 899 900 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 901 assert(m->nb_clusters > 0); 902 903 old_cluster = g_try_new(uint64_t, m->nb_clusters); 904 if (old_cluster == NULL) { 905 ret = -ENOMEM; 906 goto err; 907 } 908 909 /* copy content of unmodified sectors */ 910 ret = perform_cow(bs, m); 911 if (ret < 0) { 912 goto err; 913 } 914 915 /* Update L2 table. */ 916 if (s->use_lazy_refcounts) { 917 qcow2_mark_dirty(bs); 918 } 919 if (qcow2_need_accurate_refcounts(s)) { 920 qcow2_cache_set_dependency(bs, s->l2_table_cache, 921 s->refcount_block_cache); 922 } 923 924 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 925 if (ret < 0) { 926 goto err; 927 } 928 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 929 930 assert(l2_index + m->nb_clusters <= s->l2_size); 931 for (i = 0; i < m->nb_clusters; i++) { 932 /* if two concurrent writes happen to the same unallocated cluster 933 * each write allocates separate cluster and writes data concurrently. 934 * The first one to complete updates l2 table with pointer to its 935 * cluster the second one has to do RMW (which is done above by 936 * perform_cow()), update l2 table with its cluster pointer and free 937 * old cluster. This is what this loop does */ 938 if (l2_table[l2_index + i] != 0) { 939 old_cluster[j++] = l2_table[l2_index + i]; 940 } 941 942 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 943 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 944 } 945 946 947 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 948 949 /* 950 * If this was a COW, we need to decrease the refcount of the old cluster. 951 * 952 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 953 * clusters), the next write will reuse them anyway. 954 */ 955 if (!m->keep_old_clusters && j != 0) { 956 for (i = 0; i < j; i++) { 957 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 958 QCOW2_DISCARD_NEVER); 959 } 960 } 961 962 ret = 0; 963 err: 964 g_free(old_cluster); 965 return ret; 966 } 967 968 /* 969 * Returns the number of contiguous clusters that can be used for an allocating 970 * write, but require COW to be performed (this includes yet unallocated space, 971 * which must copy from the backing file) 972 */ 973 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 974 uint64_t *l2_table, int l2_index) 975 { 976 int i; 977 978 for (i = 0; i < nb_clusters; i++) { 979 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 980 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 981 982 switch(cluster_type) { 983 case QCOW2_CLUSTER_NORMAL: 984 if (l2_entry & QCOW_OFLAG_COPIED) { 985 goto out; 986 } 987 break; 988 case QCOW2_CLUSTER_UNALLOCATED: 989 case QCOW2_CLUSTER_COMPRESSED: 990 case QCOW2_CLUSTER_ZERO_PLAIN: 991 case QCOW2_CLUSTER_ZERO_ALLOC: 992 break; 993 default: 994 abort(); 995 } 996 } 997 998 out: 999 assert(i <= nb_clusters); 1000 return i; 1001 } 1002 1003 /* 1004 * Check if there already is an AIO write request in flight which allocates 1005 * the same cluster. In this case we need to wait until the previous 1006 * request has completed and updated the L2 table accordingly. 1007 * 1008 * Returns: 1009 * 0 if there was no dependency. *cur_bytes indicates the number of 1010 * bytes from guest_offset that can be read before the next 1011 * dependency must be processed (or the request is complete) 1012 * 1013 * -EAGAIN if we had to wait for another request, previously gathered 1014 * information on cluster allocation may be invalid now. The caller 1015 * must start over anyway, so consider *cur_bytes undefined. 1016 */ 1017 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1018 uint64_t *cur_bytes, QCowL2Meta **m) 1019 { 1020 BDRVQcow2State *s = bs->opaque; 1021 QCowL2Meta *old_alloc; 1022 uint64_t bytes = *cur_bytes; 1023 1024 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1025 1026 uint64_t start = guest_offset; 1027 uint64_t end = start + bytes; 1028 uint64_t old_start = l2meta_cow_start(old_alloc); 1029 uint64_t old_end = l2meta_cow_end(old_alloc); 1030 1031 if (end <= old_start || start >= old_end) { 1032 /* No intersection */ 1033 } else { 1034 if (start < old_start) { 1035 /* Stop at the start of a running allocation */ 1036 bytes = old_start - start; 1037 } else { 1038 bytes = 0; 1039 } 1040 1041 /* Stop if already an l2meta exists. After yielding, it wouldn't 1042 * be valid any more, so we'd have to clean up the old L2Metas 1043 * and deal with requests depending on them before starting to 1044 * gather new ones. Not worth the trouble. */ 1045 if (bytes == 0 && *m) { 1046 *cur_bytes = 0; 1047 return 0; 1048 } 1049 1050 if (bytes == 0) { 1051 /* Wait for the dependency to complete. We need to recheck 1052 * the free/allocated clusters when we continue. */ 1053 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1054 return -EAGAIN; 1055 } 1056 } 1057 } 1058 1059 /* Make sure that existing clusters and new allocations are only used up to 1060 * the next dependency if we shortened the request above */ 1061 *cur_bytes = bytes; 1062 1063 return 0; 1064 } 1065 1066 /* 1067 * Checks how many already allocated clusters that don't require a copy on 1068 * write there are at the given guest_offset (up to *bytes). If 1069 * *host_offset is not zero, only physically contiguous clusters beginning at 1070 * this host offset are counted. 1071 * 1072 * Note that guest_offset may not be cluster aligned. In this case, the 1073 * returned *host_offset points to exact byte referenced by guest_offset and 1074 * therefore isn't cluster aligned as well. 1075 * 1076 * Returns: 1077 * 0: if no allocated clusters are available at the given offset. 1078 * *bytes is normally unchanged. It is set to 0 if the cluster 1079 * is allocated and doesn't need COW, but doesn't have the right 1080 * physical offset. 1081 * 1082 * 1: if allocated clusters that don't require a COW are available at 1083 * the requested offset. *bytes may have decreased and describes 1084 * the length of the area that can be written to. 1085 * 1086 * -errno: in error cases 1087 */ 1088 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1089 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1090 { 1091 BDRVQcow2State *s = bs->opaque; 1092 int l2_index; 1093 uint64_t cluster_offset; 1094 uint64_t *l2_table; 1095 uint64_t nb_clusters; 1096 unsigned int keep_clusters; 1097 int ret; 1098 1099 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1100 *bytes); 1101 1102 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 1103 == offset_into_cluster(s, *host_offset)); 1104 1105 /* 1106 * Calculate the number of clusters to look for. We stop at L2 table 1107 * boundaries to keep things simple. 1108 */ 1109 nb_clusters = 1110 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1111 1112 l2_index = offset_to_l2_index(s, guest_offset); 1113 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1114 assert(nb_clusters <= INT_MAX); 1115 1116 /* Find L2 entry for the first involved cluster */ 1117 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1118 if (ret < 0) { 1119 return ret; 1120 } 1121 1122 cluster_offset = be64_to_cpu(l2_table[l2_index]); 1123 1124 /* Check how many clusters are already allocated and don't need COW */ 1125 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1126 && (cluster_offset & QCOW_OFLAG_COPIED)) 1127 { 1128 /* If a specific host_offset is required, check it */ 1129 bool offset_matches = 1130 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1131 1132 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1133 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1134 "%#llx unaligned (guest offset: %#" PRIx64 1135 ")", cluster_offset & L2E_OFFSET_MASK, 1136 guest_offset); 1137 ret = -EIO; 1138 goto out; 1139 } 1140 1141 if (*host_offset != 0 && !offset_matches) { 1142 *bytes = 0; 1143 ret = 0; 1144 goto out; 1145 } 1146 1147 /* We keep all QCOW_OFLAG_COPIED clusters */ 1148 keep_clusters = 1149 count_contiguous_clusters(nb_clusters, s->cluster_size, 1150 &l2_table[l2_index], 1151 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1152 assert(keep_clusters <= nb_clusters); 1153 1154 *bytes = MIN(*bytes, 1155 keep_clusters * s->cluster_size 1156 - offset_into_cluster(s, guest_offset)); 1157 1158 ret = 1; 1159 } else { 1160 ret = 0; 1161 } 1162 1163 /* Cleanup */ 1164 out: 1165 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1166 1167 /* Only return a host offset if we actually made progress. Otherwise we 1168 * would make requirements for handle_alloc() that it can't fulfill */ 1169 if (ret > 0) { 1170 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1171 + offset_into_cluster(s, guest_offset); 1172 } 1173 1174 return ret; 1175 } 1176 1177 /* 1178 * Allocates new clusters for the given guest_offset. 1179 * 1180 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1181 * contain the number of clusters that have been allocated and are contiguous 1182 * in the image file. 1183 * 1184 * If *host_offset is non-zero, it specifies the offset in the image file at 1185 * which the new clusters must start. *nb_clusters can be 0 on return in this 1186 * case if the cluster at host_offset is already in use. If *host_offset is 1187 * zero, the clusters can be allocated anywhere in the image file. 1188 * 1189 * *host_offset is updated to contain the offset into the image file at which 1190 * the first allocated cluster starts. 1191 * 1192 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1193 * function has been waiting for another request and the allocation must be 1194 * restarted, but the whole request should not be failed. 1195 */ 1196 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1197 uint64_t *host_offset, uint64_t *nb_clusters) 1198 { 1199 BDRVQcow2State *s = bs->opaque; 1200 1201 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1202 *host_offset, *nb_clusters); 1203 1204 /* Allocate new clusters */ 1205 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1206 if (*host_offset == 0) { 1207 int64_t cluster_offset = 1208 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1209 if (cluster_offset < 0) { 1210 return cluster_offset; 1211 } 1212 *host_offset = cluster_offset; 1213 return 0; 1214 } else { 1215 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1216 if (ret < 0) { 1217 return ret; 1218 } 1219 *nb_clusters = ret; 1220 return 0; 1221 } 1222 } 1223 1224 /* 1225 * Allocates new clusters for an area that either is yet unallocated or needs a 1226 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1227 * the new allocation can match the specified host offset. 1228 * 1229 * Note that guest_offset may not be cluster aligned. In this case, the 1230 * returned *host_offset points to exact byte referenced by guest_offset and 1231 * therefore isn't cluster aligned as well. 1232 * 1233 * Returns: 1234 * 0: if no clusters could be allocated. *bytes is set to 0, 1235 * *host_offset is left unchanged. 1236 * 1237 * 1: if new clusters were allocated. *bytes may be decreased if the 1238 * new allocation doesn't cover all of the requested area. 1239 * *host_offset is updated to contain the host offset of the first 1240 * newly allocated cluster. 1241 * 1242 * -errno: in error cases 1243 */ 1244 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1245 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1246 { 1247 BDRVQcow2State *s = bs->opaque; 1248 int l2_index; 1249 uint64_t *l2_table; 1250 uint64_t entry; 1251 uint64_t nb_clusters; 1252 int ret; 1253 bool keep_old_clusters = false; 1254 1255 uint64_t alloc_cluster_offset = 0; 1256 1257 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1258 *bytes); 1259 assert(*bytes > 0); 1260 1261 /* 1262 * Calculate the number of clusters to look for. We stop at L2 table 1263 * boundaries to keep things simple. 1264 */ 1265 nb_clusters = 1266 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1267 1268 l2_index = offset_to_l2_index(s, guest_offset); 1269 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1270 assert(nb_clusters <= INT_MAX); 1271 1272 /* Find L2 entry for the first involved cluster */ 1273 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1274 if (ret < 0) { 1275 return ret; 1276 } 1277 1278 entry = be64_to_cpu(l2_table[l2_index]); 1279 1280 /* For the moment, overwrite compressed clusters one by one */ 1281 if (entry & QCOW_OFLAG_COMPRESSED) { 1282 nb_clusters = 1; 1283 } else { 1284 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1285 } 1286 1287 /* This function is only called when there were no non-COW clusters, so if 1288 * we can't find any unallocated or COW clusters either, something is 1289 * wrong with our code. */ 1290 assert(nb_clusters > 0); 1291 1292 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1293 (entry & QCOW_OFLAG_COPIED) && 1294 (!*host_offset || 1295 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1296 { 1297 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1298 * would be fine, too, but count_cow_clusters() above has limited 1299 * nb_clusters already to a range of COW clusters */ 1300 int preallocated_nb_clusters = 1301 count_contiguous_clusters(nb_clusters, s->cluster_size, 1302 &l2_table[l2_index], QCOW_OFLAG_COPIED); 1303 assert(preallocated_nb_clusters > 0); 1304 1305 nb_clusters = preallocated_nb_clusters; 1306 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1307 1308 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1309 * should not free them. */ 1310 keep_old_clusters = true; 1311 } 1312 1313 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1314 1315 if (!alloc_cluster_offset) { 1316 /* Allocate, if necessary at a given offset in the image file */ 1317 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1318 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1319 &nb_clusters); 1320 if (ret < 0) { 1321 goto fail; 1322 } 1323 1324 /* Can't extend contiguous allocation */ 1325 if (nb_clusters == 0) { 1326 *bytes = 0; 1327 return 0; 1328 } 1329 1330 /* !*host_offset would overwrite the image header and is reserved for 1331 * "no host offset preferred". If 0 was a valid host offset, it'd 1332 * trigger the following overlap check; do that now to avoid having an 1333 * invalid value in *host_offset. */ 1334 if (!alloc_cluster_offset) { 1335 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1336 nb_clusters * s->cluster_size); 1337 assert(ret < 0); 1338 goto fail; 1339 } 1340 } 1341 1342 /* 1343 * Save info needed for meta data update. 1344 * 1345 * requested_bytes: Number of bytes from the start of the first 1346 * newly allocated cluster to the end of the (possibly shortened 1347 * before) write request. 1348 * 1349 * avail_bytes: Number of bytes from the start of the first 1350 * newly allocated to the end of the last newly allocated cluster. 1351 * 1352 * nb_bytes: The number of bytes from the start of the first 1353 * newly allocated cluster to the end of the area that the write 1354 * request actually writes to (excluding COW at the end) 1355 */ 1356 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1357 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1358 int nb_bytes = MIN(requested_bytes, avail_bytes); 1359 QCowL2Meta *old_m = *m; 1360 1361 *m = g_malloc0(sizeof(**m)); 1362 1363 **m = (QCowL2Meta) { 1364 .next = old_m, 1365 1366 .alloc_offset = alloc_cluster_offset, 1367 .offset = start_of_cluster(s, guest_offset), 1368 .nb_clusters = nb_clusters, 1369 1370 .keep_old_clusters = keep_old_clusters, 1371 1372 .cow_start = { 1373 .offset = 0, 1374 .nb_bytes = offset_into_cluster(s, guest_offset), 1375 }, 1376 .cow_end = { 1377 .offset = nb_bytes, 1378 .nb_bytes = avail_bytes - nb_bytes, 1379 }, 1380 }; 1381 qemu_co_queue_init(&(*m)->dependent_requests); 1382 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1383 1384 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1385 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1386 assert(*bytes != 0); 1387 1388 return 1; 1389 1390 fail: 1391 if (*m && (*m)->nb_clusters > 0) { 1392 QLIST_REMOVE(*m, next_in_flight); 1393 } 1394 return ret; 1395 } 1396 1397 /* 1398 * alloc_cluster_offset 1399 * 1400 * For a given offset on the virtual disk, find the cluster offset in qcow2 1401 * file. If the offset is not found, allocate a new cluster. 1402 * 1403 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1404 * other fields in m are meaningless. 1405 * 1406 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1407 * contiguous clusters that have been allocated. In this case, the other 1408 * fields of m are valid and contain information about the first allocated 1409 * cluster. 1410 * 1411 * If the request conflicts with another write request in flight, the coroutine 1412 * is queued and will be reentered when the dependency has completed. 1413 * 1414 * Return 0 on success and -errno in error cases 1415 */ 1416 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1417 unsigned int *bytes, uint64_t *host_offset, 1418 QCowL2Meta **m) 1419 { 1420 BDRVQcow2State *s = bs->opaque; 1421 uint64_t start, remaining; 1422 uint64_t cluster_offset; 1423 uint64_t cur_bytes; 1424 int ret; 1425 1426 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1427 1428 again: 1429 start = offset; 1430 remaining = *bytes; 1431 cluster_offset = 0; 1432 *host_offset = 0; 1433 cur_bytes = 0; 1434 *m = NULL; 1435 1436 while (true) { 1437 1438 if (!*host_offset) { 1439 *host_offset = start_of_cluster(s, cluster_offset); 1440 } 1441 1442 assert(remaining >= cur_bytes); 1443 1444 start += cur_bytes; 1445 remaining -= cur_bytes; 1446 cluster_offset += cur_bytes; 1447 1448 if (remaining == 0) { 1449 break; 1450 } 1451 1452 cur_bytes = remaining; 1453 1454 /* 1455 * Now start gathering as many contiguous clusters as possible: 1456 * 1457 * 1. Check for overlaps with in-flight allocations 1458 * 1459 * a) Overlap not in the first cluster -> shorten this request and 1460 * let the caller handle the rest in its next loop iteration. 1461 * 1462 * b) Real overlaps of two requests. Yield and restart the search 1463 * for contiguous clusters (the situation could have changed 1464 * while we were sleeping) 1465 * 1466 * c) TODO: Request starts in the same cluster as the in-flight 1467 * allocation ends. Shorten the COW of the in-fight allocation, 1468 * set cluster_offset to write to the same cluster and set up 1469 * the right synchronisation between the in-flight request and 1470 * the new one. 1471 */ 1472 ret = handle_dependencies(bs, start, &cur_bytes, m); 1473 if (ret == -EAGAIN) { 1474 /* Currently handle_dependencies() doesn't yield if we already had 1475 * an allocation. If it did, we would have to clean up the L2Meta 1476 * structs before starting over. */ 1477 assert(*m == NULL); 1478 goto again; 1479 } else if (ret < 0) { 1480 return ret; 1481 } else if (cur_bytes == 0) { 1482 break; 1483 } else { 1484 /* handle_dependencies() may have decreased cur_bytes (shortened 1485 * the allocations below) so that the next dependency is processed 1486 * correctly during the next loop iteration. */ 1487 } 1488 1489 /* 1490 * 2. Count contiguous COPIED clusters. 1491 */ 1492 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1493 if (ret < 0) { 1494 return ret; 1495 } else if (ret) { 1496 continue; 1497 } else if (cur_bytes == 0) { 1498 break; 1499 } 1500 1501 /* 1502 * 3. If the request still hasn't completed, allocate new clusters, 1503 * considering any cluster_offset of steps 1c or 2. 1504 */ 1505 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1506 if (ret < 0) { 1507 return ret; 1508 } else if (ret) { 1509 continue; 1510 } else { 1511 assert(cur_bytes == 0); 1512 break; 1513 } 1514 } 1515 1516 *bytes -= remaining; 1517 assert(*bytes > 0); 1518 assert(*host_offset != 0); 1519 1520 return 0; 1521 } 1522 1523 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1524 const uint8_t *buf, int buf_size) 1525 { 1526 z_stream strm1, *strm = &strm1; 1527 int ret, out_len; 1528 1529 memset(strm, 0, sizeof(*strm)); 1530 1531 strm->next_in = (uint8_t *)buf; 1532 strm->avail_in = buf_size; 1533 strm->next_out = out_buf; 1534 strm->avail_out = out_buf_size; 1535 1536 ret = inflateInit2(strm, -12); 1537 if (ret != Z_OK) 1538 return -1; 1539 ret = inflate(strm, Z_FINISH); 1540 out_len = strm->next_out - out_buf; 1541 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1542 out_len != out_buf_size) { 1543 inflateEnd(strm); 1544 return -1; 1545 } 1546 inflateEnd(strm); 1547 return 0; 1548 } 1549 1550 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1551 { 1552 BDRVQcow2State *s = bs->opaque; 1553 int ret, csize, nb_csectors, sector_offset; 1554 uint64_t coffset; 1555 1556 coffset = cluster_offset & s->cluster_offset_mask; 1557 if (s->cluster_cache_offset != coffset) { 1558 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1559 sector_offset = coffset & 511; 1560 csize = nb_csectors * 512 - sector_offset; 1561 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1562 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, 1563 nb_csectors); 1564 if (ret < 0) { 1565 return ret; 1566 } 1567 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1568 s->cluster_data + sector_offset, csize) < 0) { 1569 return -EIO; 1570 } 1571 s->cluster_cache_offset = coffset; 1572 } 1573 return 0; 1574 } 1575 1576 /* 1577 * This discards as many clusters of nb_clusters as possible at once (i.e. 1578 * all clusters in the same L2 table) and returns the number of discarded 1579 * clusters. 1580 */ 1581 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1582 uint64_t nb_clusters, enum qcow2_discard_type type, 1583 bool full_discard) 1584 { 1585 BDRVQcow2State *s = bs->opaque; 1586 uint64_t *l2_table; 1587 int l2_index; 1588 int ret; 1589 int i; 1590 1591 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1592 if (ret < 0) { 1593 return ret; 1594 } 1595 1596 /* Limit nb_clusters to one L2 table */ 1597 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1598 assert(nb_clusters <= INT_MAX); 1599 1600 for (i = 0; i < nb_clusters; i++) { 1601 uint64_t old_l2_entry; 1602 1603 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1604 1605 /* 1606 * If full_discard is false, make sure that a discarded area reads back 1607 * as zeroes for v3 images (we cannot do it for v2 without actually 1608 * writing a zero-filled buffer). We can skip the operation if the 1609 * cluster is already marked as zero, or if it's unallocated and we 1610 * don't have a backing file. 1611 * 1612 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1613 * holding s->lock, so that doesn't work today. 1614 * 1615 * If full_discard is true, the sector should not read back as zeroes, 1616 * but rather fall through to the backing file. 1617 */ 1618 switch (qcow2_get_cluster_type(old_l2_entry)) { 1619 case QCOW2_CLUSTER_UNALLOCATED: 1620 if (full_discard || !bs->backing) { 1621 continue; 1622 } 1623 break; 1624 1625 case QCOW2_CLUSTER_ZERO_PLAIN: 1626 if (!full_discard) { 1627 continue; 1628 } 1629 break; 1630 1631 case QCOW2_CLUSTER_ZERO_ALLOC: 1632 case QCOW2_CLUSTER_NORMAL: 1633 case QCOW2_CLUSTER_COMPRESSED: 1634 break; 1635 1636 default: 1637 abort(); 1638 } 1639 1640 /* First remove L2 entries */ 1641 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1642 if (!full_discard && s->qcow_version >= 3) { 1643 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1644 } else { 1645 l2_table[l2_index + i] = cpu_to_be64(0); 1646 } 1647 1648 /* Then decrease the refcount */ 1649 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1650 } 1651 1652 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1653 1654 return nb_clusters; 1655 } 1656 1657 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1658 uint64_t bytes, enum qcow2_discard_type type, 1659 bool full_discard) 1660 { 1661 BDRVQcow2State *s = bs->opaque; 1662 uint64_t end_offset = offset + bytes; 1663 uint64_t nb_clusters; 1664 int64_t cleared; 1665 int ret; 1666 1667 /* Caller must pass aligned values, except at image end */ 1668 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1669 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1670 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1671 1672 nb_clusters = size_to_clusters(s, bytes); 1673 1674 s->cache_discards = true; 1675 1676 /* Each L2 table is handled by its own loop iteration */ 1677 while (nb_clusters > 0) { 1678 cleared = discard_single_l2(bs, offset, nb_clusters, type, 1679 full_discard); 1680 if (cleared < 0) { 1681 ret = cleared; 1682 goto fail; 1683 } 1684 1685 nb_clusters -= cleared; 1686 offset += (cleared * s->cluster_size); 1687 } 1688 1689 ret = 0; 1690 fail: 1691 s->cache_discards = false; 1692 qcow2_process_discards(bs, ret); 1693 1694 return ret; 1695 } 1696 1697 /* 1698 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1699 * all clusters in the same L2 table) and returns the number of zeroed 1700 * clusters. 1701 */ 1702 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1703 uint64_t nb_clusters, int flags) 1704 { 1705 BDRVQcow2State *s = bs->opaque; 1706 uint64_t *l2_table; 1707 int l2_index; 1708 int ret; 1709 int i; 1710 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1711 1712 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1713 if (ret < 0) { 1714 return ret; 1715 } 1716 1717 /* Limit nb_clusters to one L2 table */ 1718 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1719 assert(nb_clusters <= INT_MAX); 1720 1721 for (i = 0; i < nb_clusters; i++) { 1722 uint64_t old_offset; 1723 QCow2ClusterType cluster_type; 1724 1725 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1726 1727 /* 1728 * Minimize L2 changes if the cluster already reads back as 1729 * zeroes with correct allocation. 1730 */ 1731 cluster_type = qcow2_get_cluster_type(old_offset); 1732 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1733 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1734 continue; 1735 } 1736 1737 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1738 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1739 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1740 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1741 } else { 1742 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1743 } 1744 } 1745 1746 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1747 1748 return nb_clusters; 1749 } 1750 1751 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1752 uint64_t bytes, int flags) 1753 { 1754 BDRVQcow2State *s = bs->opaque; 1755 uint64_t end_offset = offset + bytes; 1756 uint64_t nb_clusters; 1757 int64_t cleared; 1758 int ret; 1759 1760 /* Caller must pass aligned values, except at image end */ 1761 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1762 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1763 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1764 1765 /* The zero flag is only supported by version 3 and newer */ 1766 if (s->qcow_version < 3) { 1767 return -ENOTSUP; 1768 } 1769 1770 /* Each L2 table is handled by its own loop iteration */ 1771 nb_clusters = size_to_clusters(s, bytes); 1772 1773 s->cache_discards = true; 1774 1775 while (nb_clusters > 0) { 1776 cleared = zero_single_l2(bs, offset, nb_clusters, flags); 1777 if (cleared < 0) { 1778 ret = cleared; 1779 goto fail; 1780 } 1781 1782 nb_clusters -= cleared; 1783 offset += (cleared * s->cluster_size); 1784 } 1785 1786 ret = 0; 1787 fail: 1788 s->cache_discards = false; 1789 qcow2_process_discards(bs, ret); 1790 1791 return ret; 1792 } 1793 1794 /* 1795 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1796 * non-backed non-pre-allocated zero clusters). 1797 * 1798 * l1_entries and *visited_l1_entries are used to keep track of progress for 1799 * status_cb(). l1_entries contains the total number of L1 entries and 1800 * *visited_l1_entries counts all visited L1 entries. 1801 */ 1802 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1803 int l1_size, int64_t *visited_l1_entries, 1804 int64_t l1_entries, 1805 BlockDriverAmendStatusCB *status_cb, 1806 void *cb_opaque) 1807 { 1808 BDRVQcow2State *s = bs->opaque; 1809 bool is_active_l1 = (l1_table == s->l1_table); 1810 uint64_t *l2_table = NULL; 1811 int ret; 1812 int i, j; 1813 1814 if (!is_active_l1) { 1815 /* inactive L2 tables require a buffer to be stored in when loading 1816 * them from disk */ 1817 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size); 1818 if (l2_table == NULL) { 1819 return -ENOMEM; 1820 } 1821 } 1822 1823 for (i = 0; i < l1_size; i++) { 1824 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1825 bool l2_dirty = false; 1826 uint64_t l2_refcount; 1827 1828 if (!l2_offset) { 1829 /* unallocated */ 1830 (*visited_l1_entries)++; 1831 if (status_cb) { 1832 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1833 } 1834 continue; 1835 } 1836 1837 if (offset_into_cluster(s, l2_offset)) { 1838 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1839 PRIx64 " unaligned (L1 index: %#x)", 1840 l2_offset, i); 1841 ret = -EIO; 1842 goto fail; 1843 } 1844 1845 if (is_active_l1) { 1846 /* get active L2 tables from cache */ 1847 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1848 (void **)&l2_table); 1849 } else { 1850 /* load inactive L2 tables from disk */ 1851 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1852 (void *)l2_table, s->cluster_sectors); 1853 } 1854 if (ret < 0) { 1855 goto fail; 1856 } 1857 1858 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1859 &l2_refcount); 1860 if (ret < 0) { 1861 goto fail; 1862 } 1863 1864 for (j = 0; j < s->l2_size; j++) { 1865 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1866 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1867 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 1868 1869 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1870 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1871 continue; 1872 } 1873 1874 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1875 if (!bs->backing) { 1876 /* not backed; therefore we can simply deallocate the 1877 * cluster */ 1878 l2_table[j] = 0; 1879 l2_dirty = true; 1880 continue; 1881 } 1882 1883 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1884 if (offset < 0) { 1885 ret = offset; 1886 goto fail; 1887 } 1888 1889 if (l2_refcount > 1) { 1890 /* For shared L2 tables, set the refcount accordingly (it is 1891 * already 1 and needs to be l2_refcount) */ 1892 ret = qcow2_update_cluster_refcount(bs, 1893 offset >> s->cluster_bits, 1894 refcount_diff(1, l2_refcount), false, 1895 QCOW2_DISCARD_OTHER); 1896 if (ret < 0) { 1897 qcow2_free_clusters(bs, offset, s->cluster_size, 1898 QCOW2_DISCARD_OTHER); 1899 goto fail; 1900 } 1901 } 1902 } 1903 1904 if (offset_into_cluster(s, offset)) { 1905 qcow2_signal_corruption(bs, true, -1, -1, 1906 "Cluster allocation offset " 1907 "%#" PRIx64 " unaligned (L2 offset: %#" 1908 PRIx64 ", L2 index: %#x)", offset, 1909 l2_offset, j); 1910 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1911 qcow2_free_clusters(bs, offset, s->cluster_size, 1912 QCOW2_DISCARD_ALWAYS); 1913 } 1914 ret = -EIO; 1915 goto fail; 1916 } 1917 1918 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1919 if (ret < 0) { 1920 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1921 qcow2_free_clusters(bs, offset, s->cluster_size, 1922 QCOW2_DISCARD_ALWAYS); 1923 } 1924 goto fail; 1925 } 1926 1927 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0); 1928 if (ret < 0) { 1929 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1930 qcow2_free_clusters(bs, offset, s->cluster_size, 1931 QCOW2_DISCARD_ALWAYS); 1932 } 1933 goto fail; 1934 } 1935 1936 if (l2_refcount == 1) { 1937 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1938 } else { 1939 l2_table[j] = cpu_to_be64(offset); 1940 } 1941 l2_dirty = true; 1942 } 1943 1944 if (is_active_l1) { 1945 if (l2_dirty) { 1946 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1947 qcow2_cache_depends_on_flush(s->l2_table_cache); 1948 } 1949 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1950 } else { 1951 if (l2_dirty) { 1952 ret = qcow2_pre_write_overlap_check(bs, 1953 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1954 s->cluster_size); 1955 if (ret < 0) { 1956 goto fail; 1957 } 1958 1959 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1960 (void *)l2_table, s->cluster_sectors); 1961 if (ret < 0) { 1962 goto fail; 1963 } 1964 } 1965 } 1966 1967 (*visited_l1_entries)++; 1968 if (status_cb) { 1969 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1970 } 1971 } 1972 1973 ret = 0; 1974 1975 fail: 1976 if (l2_table) { 1977 if (!is_active_l1) { 1978 qemu_vfree(l2_table); 1979 } else { 1980 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1981 } 1982 } 1983 return ret; 1984 } 1985 1986 /* 1987 * For backed images, expands all zero clusters on the image. For non-backed 1988 * images, deallocates all non-pre-allocated zero clusters (and claims the 1989 * allocation for pre-allocated ones). This is important for downgrading to a 1990 * qcow2 version which doesn't yet support metadata zero clusters. 1991 */ 1992 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1993 BlockDriverAmendStatusCB *status_cb, 1994 void *cb_opaque) 1995 { 1996 BDRVQcow2State *s = bs->opaque; 1997 uint64_t *l1_table = NULL; 1998 int64_t l1_entries = 0, visited_l1_entries = 0; 1999 int ret; 2000 int i, j; 2001 2002 if (status_cb) { 2003 l1_entries = s->l1_size; 2004 for (i = 0; i < s->nb_snapshots; i++) { 2005 l1_entries += s->snapshots[i].l1_size; 2006 } 2007 } 2008 2009 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2010 &visited_l1_entries, l1_entries, 2011 status_cb, cb_opaque); 2012 if (ret < 0) { 2013 goto fail; 2014 } 2015 2016 /* Inactive L1 tables may point to active L2 tables - therefore it is 2017 * necessary to flush the L2 table cache before trying to access the L2 2018 * tables pointed to by inactive L1 entries (else we might try to expand 2019 * zero clusters that have already been expanded); furthermore, it is also 2020 * necessary to empty the L2 table cache, since it may contain tables which 2021 * are now going to be modified directly on disk, bypassing the cache. 2022 * qcow2_cache_empty() does both for us. */ 2023 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2024 if (ret < 0) { 2025 goto fail; 2026 } 2027 2028 for (i = 0; i < s->nb_snapshots; i++) { 2029 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size * 2030 sizeof(uint64_t), BDRV_SECTOR_SIZE); 2031 2032 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 2033 2034 ret = bdrv_read(bs->file, 2035 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, 2036 (void *)l1_table, l1_sectors); 2037 if (ret < 0) { 2038 goto fail; 2039 } 2040 2041 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2042 be64_to_cpus(&l1_table[j]); 2043 } 2044 2045 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2046 &visited_l1_entries, l1_entries, 2047 status_cb, cb_opaque); 2048 if (ret < 0) { 2049 goto fail; 2050 } 2051 } 2052 2053 ret = 0; 2054 2055 fail: 2056 g_free(l1_table); 2057 return ret; 2058 } 2059