1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu-common.h" 26 #include "block/block_int.h" 27 #include "block/qcow2.h" 28 #include "qemu/range.h" 29 30 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size); 31 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 32 int64_t offset, int64_t length, 33 int addend, enum qcow2_discard_type type); 34 35 36 /*********************************************************/ 37 /* refcount handling */ 38 39 int qcow2_refcount_init(BlockDriverState *bs) 40 { 41 BDRVQcowState *s = bs->opaque; 42 unsigned int refcount_table_size2, i; 43 int ret; 44 45 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t)); 46 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t); 47 s->refcount_table = g_try_malloc(refcount_table_size2); 48 49 if (s->refcount_table_size > 0) { 50 if (s->refcount_table == NULL) { 51 ret = -ENOMEM; 52 goto fail; 53 } 54 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); 55 ret = bdrv_pread(bs->file, s->refcount_table_offset, 56 s->refcount_table, refcount_table_size2); 57 if (ret < 0) { 58 goto fail; 59 } 60 for(i = 0; i < s->refcount_table_size; i++) 61 be64_to_cpus(&s->refcount_table[i]); 62 } 63 return 0; 64 fail: 65 return ret; 66 } 67 68 void qcow2_refcount_close(BlockDriverState *bs) 69 { 70 BDRVQcowState *s = bs->opaque; 71 g_free(s->refcount_table); 72 } 73 74 75 static int load_refcount_block(BlockDriverState *bs, 76 int64_t refcount_block_offset, 77 void **refcount_block) 78 { 79 BDRVQcowState *s = bs->opaque; 80 int ret; 81 82 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD); 83 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 84 refcount_block); 85 86 return ret; 87 } 88 89 /* 90 * Returns the refcount of the cluster given by its index. Any non-negative 91 * return value is the refcount of the cluster, negative values are -errno 92 * and indicate an error. 93 */ 94 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index) 95 { 96 BDRVQcowState *s = bs->opaque; 97 uint64_t refcount_table_index, block_index; 98 int64_t refcount_block_offset; 99 int ret; 100 uint16_t *refcount_block; 101 uint16_t refcount; 102 103 refcount_table_index = cluster_index >> s->refcount_block_bits; 104 if (refcount_table_index >= s->refcount_table_size) 105 return 0; 106 refcount_block_offset = 107 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 108 if (!refcount_block_offset) 109 return 0; 110 111 if (offset_into_cluster(s, refcount_block_offset)) { 112 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64 113 " unaligned (reftable index: %#" PRIx64 ")", 114 refcount_block_offset, refcount_table_index); 115 return -EIO; 116 } 117 118 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 119 (void**) &refcount_block); 120 if (ret < 0) { 121 return ret; 122 } 123 124 block_index = cluster_index & (s->refcount_block_size - 1); 125 refcount = be16_to_cpu(refcount_block[block_index]); 126 127 ret = qcow2_cache_put(bs, s->refcount_block_cache, 128 (void**) &refcount_block); 129 if (ret < 0) { 130 return ret; 131 } 132 133 return refcount; 134 } 135 136 /* 137 * Rounds the refcount table size up to avoid growing the table for each single 138 * refcount block that is allocated. 139 */ 140 static unsigned int next_refcount_table_size(BDRVQcowState *s, 141 unsigned int min_size) 142 { 143 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1; 144 unsigned int refcount_table_clusters = 145 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3)); 146 147 while (min_clusters > refcount_table_clusters) { 148 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2; 149 } 150 151 return refcount_table_clusters << (s->cluster_bits - 3); 152 } 153 154 155 /* Checks if two offsets are described by the same refcount block */ 156 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a, 157 uint64_t offset_b) 158 { 159 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits); 160 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits); 161 162 return (block_a == block_b); 163 } 164 165 /* 166 * Loads a refcount block. If it doesn't exist yet, it is allocated first 167 * (including growing the refcount table if needed). 168 * 169 * Returns 0 on success or -errno in error case 170 */ 171 static int alloc_refcount_block(BlockDriverState *bs, 172 int64_t cluster_index, uint16_t **refcount_block) 173 { 174 BDRVQcowState *s = bs->opaque; 175 unsigned int refcount_table_index; 176 int ret; 177 178 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 179 180 /* Find the refcount block for the given cluster */ 181 refcount_table_index = cluster_index >> s->refcount_block_bits; 182 183 if (refcount_table_index < s->refcount_table_size) { 184 185 uint64_t refcount_block_offset = 186 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 187 188 /* If it's already there, we're done */ 189 if (refcount_block_offset) { 190 if (offset_into_cluster(s, refcount_block_offset)) { 191 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" 192 PRIx64 " unaligned (reftable index: " 193 "%#x)", refcount_block_offset, 194 refcount_table_index); 195 return -EIO; 196 } 197 198 return load_refcount_block(bs, refcount_block_offset, 199 (void**) refcount_block); 200 } 201 } 202 203 /* 204 * If we came here, we need to allocate something. Something is at least 205 * a cluster for the new refcount block. It may also include a new refcount 206 * table if the old refcount table is too small. 207 * 208 * Note that allocating clusters here needs some special care: 209 * 210 * - We can't use the normal qcow2_alloc_clusters(), it would try to 211 * increase the refcount and very likely we would end up with an endless 212 * recursion. Instead we must place the refcount blocks in a way that 213 * they can describe them themselves. 214 * 215 * - We need to consider that at this point we are inside update_refcounts 216 * and potentially doing an initial refcount increase. This means that 217 * some clusters have already been allocated by the caller, but their 218 * refcount isn't accurate yet. If we allocate clusters for metadata, we 219 * need to return -EAGAIN to signal the caller that it needs to restart 220 * the search for free clusters. 221 * 222 * - alloc_clusters_noref and qcow2_free_clusters may load a different 223 * refcount block into the cache 224 */ 225 226 *refcount_block = NULL; 227 228 /* We write to the refcount table, so we might depend on L2 tables */ 229 ret = qcow2_cache_flush(bs, s->l2_table_cache); 230 if (ret < 0) { 231 return ret; 232 } 233 234 /* Allocate the refcount block itself and mark it as used */ 235 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); 236 if (new_block < 0) { 237 return new_block; 238 } 239 240 #ifdef DEBUG_ALLOC2 241 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 242 " at %" PRIx64 "\n", 243 refcount_table_index, cluster_index << s->cluster_bits, new_block); 244 #endif 245 246 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { 247 /* Zero the new refcount block before updating it */ 248 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 249 (void**) refcount_block); 250 if (ret < 0) { 251 goto fail_block; 252 } 253 254 memset(*refcount_block, 0, s->cluster_size); 255 256 /* The block describes itself, need to update the cache */ 257 int block_index = (new_block >> s->cluster_bits) & 258 (s->refcount_block_size - 1); 259 (*refcount_block)[block_index] = cpu_to_be16(1); 260 } else { 261 /* Described somewhere else. This can recurse at most twice before we 262 * arrive at a block that describes itself. */ 263 ret = update_refcount(bs, new_block, s->cluster_size, 1, 264 QCOW2_DISCARD_NEVER); 265 if (ret < 0) { 266 goto fail_block; 267 } 268 269 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 270 if (ret < 0) { 271 goto fail_block; 272 } 273 274 /* Initialize the new refcount block only after updating its refcount, 275 * update_refcount uses the refcount cache itself */ 276 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 277 (void**) refcount_block); 278 if (ret < 0) { 279 goto fail_block; 280 } 281 282 memset(*refcount_block, 0, s->cluster_size); 283 } 284 285 /* Now the new refcount block needs to be written to disk */ 286 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); 287 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); 288 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 289 if (ret < 0) { 290 goto fail_block; 291 } 292 293 /* If the refcount table is big enough, just hook the block up there */ 294 if (refcount_table_index < s->refcount_table_size) { 295 uint64_t data64 = cpu_to_be64(new_block); 296 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); 297 ret = bdrv_pwrite_sync(bs->file, 298 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), 299 &data64, sizeof(data64)); 300 if (ret < 0) { 301 goto fail_block; 302 } 303 304 s->refcount_table[refcount_table_index] = new_block; 305 306 /* The new refcount block may be where the caller intended to put its 307 * data, so let it restart the search. */ 308 return -EAGAIN; 309 } 310 311 ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); 312 if (ret < 0) { 313 goto fail_block; 314 } 315 316 /* 317 * If we come here, we need to grow the refcount table. Again, a new 318 * refcount table needs some space and we can't simply allocate to avoid 319 * endless recursion. 320 * 321 * Therefore let's grab new refcount blocks at the end of the image, which 322 * will describe themselves and the new refcount table. This way we can 323 * reference them only in the new table and do the switch to the new 324 * refcount table at once without producing an inconsistent state in 325 * between. 326 */ 327 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); 328 329 /* Calculate the number of refcount blocks needed so far */ 330 uint64_t blocks_used = DIV_ROUND_UP(cluster_index, s->refcount_block_size); 331 332 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) { 333 return -EFBIG; 334 } 335 336 /* And now we need at least one block more for the new metadata */ 337 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); 338 uint64_t last_table_size; 339 uint64_t blocks_clusters; 340 do { 341 uint64_t table_clusters = 342 size_to_clusters(s, table_size * sizeof(uint64_t)); 343 blocks_clusters = 1 + 344 ((table_clusters + s->refcount_block_size - 1) 345 / s->refcount_block_size); 346 uint64_t meta_clusters = table_clusters + blocks_clusters; 347 348 last_table_size = table_size; 349 table_size = next_refcount_table_size(s, blocks_used + 350 ((meta_clusters + s->refcount_block_size - 1) 351 / s->refcount_block_size)); 352 353 } while (last_table_size != table_size); 354 355 #ifdef DEBUG_ALLOC2 356 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", 357 s->refcount_table_size, table_size); 358 #endif 359 360 /* Create the new refcount table and blocks */ 361 uint64_t meta_offset = (blocks_used * s->refcount_block_size) * 362 s->cluster_size; 363 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; 364 uint64_t *new_table = g_try_new0(uint64_t, table_size); 365 uint16_t *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size); 366 367 assert(table_size > 0 && blocks_clusters > 0); 368 if (new_table == NULL || new_blocks == NULL) { 369 ret = -ENOMEM; 370 goto fail_table; 371 } 372 373 /* Fill the new refcount table */ 374 memcpy(new_table, s->refcount_table, 375 s->refcount_table_size * sizeof(uint64_t)); 376 new_table[refcount_table_index] = new_block; 377 378 int i; 379 for (i = 0; i < blocks_clusters; i++) { 380 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); 381 } 382 383 /* Fill the refcount blocks */ 384 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); 385 int block = 0; 386 for (i = 0; i < table_clusters + blocks_clusters; i++) { 387 new_blocks[block++] = cpu_to_be16(1); 388 } 389 390 /* Write refcount blocks to disk */ 391 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); 392 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, 393 blocks_clusters * s->cluster_size); 394 g_free(new_blocks); 395 new_blocks = NULL; 396 if (ret < 0) { 397 goto fail_table; 398 } 399 400 /* Write refcount table to disk */ 401 for(i = 0; i < table_size; i++) { 402 cpu_to_be64s(&new_table[i]); 403 } 404 405 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); 406 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, 407 table_size * sizeof(uint64_t)); 408 if (ret < 0) { 409 goto fail_table; 410 } 411 412 for(i = 0; i < table_size; i++) { 413 be64_to_cpus(&new_table[i]); 414 } 415 416 /* Hook up the new refcount table in the qcow2 header */ 417 uint8_t data[12]; 418 cpu_to_be64w((uint64_t*)data, table_offset); 419 cpu_to_be32w((uint32_t*)(data + 8), table_clusters); 420 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); 421 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), 422 data, sizeof(data)); 423 if (ret < 0) { 424 goto fail_table; 425 } 426 427 /* And switch it in memory */ 428 uint64_t old_table_offset = s->refcount_table_offset; 429 uint64_t old_table_size = s->refcount_table_size; 430 431 g_free(s->refcount_table); 432 s->refcount_table = new_table; 433 s->refcount_table_size = table_size; 434 s->refcount_table_offset = table_offset; 435 436 /* Free old table. */ 437 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), 438 QCOW2_DISCARD_OTHER); 439 440 ret = load_refcount_block(bs, new_block, (void**) refcount_block); 441 if (ret < 0) { 442 return ret; 443 } 444 445 /* If we were trying to do the initial refcount update for some cluster 446 * allocation, we might have used the same clusters to store newly 447 * allocated metadata. Make the caller search some new space. */ 448 return -EAGAIN; 449 450 fail_table: 451 g_free(new_blocks); 452 g_free(new_table); 453 fail_block: 454 if (*refcount_block != NULL) { 455 qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); 456 } 457 return ret; 458 } 459 460 void qcow2_process_discards(BlockDriverState *bs, int ret) 461 { 462 BDRVQcowState *s = bs->opaque; 463 Qcow2DiscardRegion *d, *next; 464 465 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) { 466 QTAILQ_REMOVE(&s->discards, d, next); 467 468 /* Discard is optional, ignore the return value */ 469 if (ret >= 0) { 470 bdrv_discard(bs->file, 471 d->offset >> BDRV_SECTOR_BITS, 472 d->bytes >> BDRV_SECTOR_BITS); 473 } 474 475 g_free(d); 476 } 477 } 478 479 static void update_refcount_discard(BlockDriverState *bs, 480 uint64_t offset, uint64_t length) 481 { 482 BDRVQcowState *s = bs->opaque; 483 Qcow2DiscardRegion *d, *p, *next; 484 485 QTAILQ_FOREACH(d, &s->discards, next) { 486 uint64_t new_start = MIN(offset, d->offset); 487 uint64_t new_end = MAX(offset + length, d->offset + d->bytes); 488 489 if (new_end - new_start <= length + d->bytes) { 490 /* There can't be any overlap, areas ending up here have no 491 * references any more and therefore shouldn't get freed another 492 * time. */ 493 assert(d->bytes + length == new_end - new_start); 494 d->offset = new_start; 495 d->bytes = new_end - new_start; 496 goto found; 497 } 498 } 499 500 d = g_malloc(sizeof(*d)); 501 *d = (Qcow2DiscardRegion) { 502 .bs = bs, 503 .offset = offset, 504 .bytes = length, 505 }; 506 QTAILQ_INSERT_TAIL(&s->discards, d, next); 507 508 found: 509 /* Merge discard requests if they are adjacent now */ 510 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { 511 if (p == d 512 || p->offset > d->offset + d->bytes 513 || d->offset > p->offset + p->bytes) 514 { 515 continue; 516 } 517 518 /* Still no overlap possible */ 519 assert(p->offset == d->offset + d->bytes 520 || d->offset == p->offset + p->bytes); 521 522 QTAILQ_REMOVE(&s->discards, p, next); 523 d->offset = MIN(d->offset, p->offset); 524 d->bytes += p->bytes; 525 g_free(p); 526 } 527 } 528 529 /* XXX: cache several refcount block clusters ? */ 530 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 531 int64_t offset, int64_t length, int addend, enum qcow2_discard_type type) 532 { 533 BDRVQcowState *s = bs->opaque; 534 int64_t start, last, cluster_offset; 535 uint16_t *refcount_block = NULL; 536 int64_t old_table_index = -1; 537 int ret; 538 539 #ifdef DEBUG_ALLOC2 540 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n", 541 offset, length, addend); 542 #endif 543 if (length < 0) { 544 return -EINVAL; 545 } else if (length == 0) { 546 return 0; 547 } 548 549 if (addend < 0) { 550 qcow2_cache_set_dependency(bs, s->refcount_block_cache, 551 s->l2_table_cache); 552 } 553 554 start = start_of_cluster(s, offset); 555 last = start_of_cluster(s, offset + length - 1); 556 for(cluster_offset = start; cluster_offset <= last; 557 cluster_offset += s->cluster_size) 558 { 559 int block_index, refcount; 560 int64_t cluster_index = cluster_offset >> s->cluster_bits; 561 int64_t table_index = cluster_index >> s->refcount_block_bits; 562 563 /* Load the refcount block and allocate it if needed */ 564 if (table_index != old_table_index) { 565 if (refcount_block) { 566 ret = qcow2_cache_put(bs, s->refcount_block_cache, 567 (void**) &refcount_block); 568 if (ret < 0) { 569 goto fail; 570 } 571 } 572 573 ret = alloc_refcount_block(bs, cluster_index, &refcount_block); 574 if (ret < 0) { 575 goto fail; 576 } 577 } 578 old_table_index = table_index; 579 580 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block); 581 582 /* we can update the count and save it */ 583 block_index = cluster_index & (s->refcount_block_size - 1); 584 585 refcount = be16_to_cpu(refcount_block[block_index]); 586 refcount += addend; 587 if (refcount < 0 || refcount > 0xffff) { 588 ret = -EINVAL; 589 goto fail; 590 } 591 if (refcount == 0 && cluster_index < s->free_cluster_index) { 592 s->free_cluster_index = cluster_index; 593 } 594 refcount_block[block_index] = cpu_to_be16(refcount); 595 596 if (refcount == 0 && s->discard_passthrough[type]) { 597 update_refcount_discard(bs, cluster_offset, s->cluster_size); 598 } 599 } 600 601 ret = 0; 602 fail: 603 if (!s->cache_discards) { 604 qcow2_process_discards(bs, ret); 605 } 606 607 /* Write last changed block to disk */ 608 if (refcount_block) { 609 int wret; 610 wret = qcow2_cache_put(bs, s->refcount_block_cache, 611 (void**) &refcount_block); 612 if (wret < 0) { 613 return ret < 0 ? ret : wret; 614 } 615 } 616 617 /* 618 * Try do undo any updates if an error is returned (This may succeed in 619 * some cases like ENOSPC for allocating a new refcount block) 620 */ 621 if (ret < 0) { 622 int dummy; 623 dummy = update_refcount(bs, offset, cluster_offset - offset, -addend, 624 QCOW2_DISCARD_NEVER); 625 (void)dummy; 626 } 627 628 return ret; 629 } 630 631 /* 632 * Increases or decreases the refcount of a given cluster. 633 * 634 * If the return value is non-negative, it is the new refcount of the cluster. 635 * If it is negative, it is -errno and indicates an error. 636 */ 637 int qcow2_update_cluster_refcount(BlockDriverState *bs, 638 int64_t cluster_index, 639 int addend, 640 enum qcow2_discard_type type) 641 { 642 BDRVQcowState *s = bs->opaque; 643 int ret; 644 645 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend, 646 type); 647 if (ret < 0) { 648 return ret; 649 } 650 651 return qcow2_get_refcount(bs, cluster_index); 652 } 653 654 655 656 /*********************************************************/ 657 /* cluster allocation functions */ 658 659 660 661 /* return < 0 if error */ 662 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size) 663 { 664 BDRVQcowState *s = bs->opaque; 665 uint64_t i, nb_clusters; 666 int refcount; 667 668 nb_clusters = size_to_clusters(s, size); 669 retry: 670 for(i = 0; i < nb_clusters; i++) { 671 uint64_t next_cluster_index = s->free_cluster_index++; 672 refcount = qcow2_get_refcount(bs, next_cluster_index); 673 674 if (refcount < 0) { 675 return refcount; 676 } else if (refcount != 0) { 677 goto retry; 678 } 679 } 680 681 /* Make sure that all offsets in the "allocated" range are representable 682 * in an int64_t */ 683 if (s->free_cluster_index > 0 && 684 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits)) 685 { 686 return -EFBIG; 687 } 688 689 #ifdef DEBUG_ALLOC2 690 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", 691 size, 692 (s->free_cluster_index - nb_clusters) << s->cluster_bits); 693 #endif 694 return (s->free_cluster_index - nb_clusters) << s->cluster_bits; 695 } 696 697 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size) 698 { 699 int64_t offset; 700 int ret; 701 702 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); 703 do { 704 offset = alloc_clusters_noref(bs, size); 705 if (offset < 0) { 706 return offset; 707 } 708 709 ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER); 710 } while (ret == -EAGAIN); 711 712 if (ret < 0) { 713 return ret; 714 } 715 716 return offset; 717 } 718 719 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, 720 int nb_clusters) 721 { 722 BDRVQcowState *s = bs->opaque; 723 uint64_t cluster_index; 724 uint64_t i; 725 int refcount, ret; 726 727 assert(nb_clusters >= 0); 728 if (nb_clusters == 0) { 729 return 0; 730 } 731 732 do { 733 /* Check how many clusters there are free */ 734 cluster_index = offset >> s->cluster_bits; 735 for(i = 0; i < nb_clusters; i++) { 736 refcount = qcow2_get_refcount(bs, cluster_index++); 737 738 if (refcount < 0) { 739 return refcount; 740 } else if (refcount != 0) { 741 break; 742 } 743 } 744 745 /* And then allocate them */ 746 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, 747 QCOW2_DISCARD_NEVER); 748 } while (ret == -EAGAIN); 749 750 if (ret < 0) { 751 return ret; 752 } 753 754 return i; 755 } 756 757 /* only used to allocate compressed sectors. We try to allocate 758 contiguous sectors. size must be <= cluster_size */ 759 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) 760 { 761 BDRVQcowState *s = bs->opaque; 762 int64_t offset, cluster_offset; 763 int free_in_cluster; 764 765 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); 766 assert(size > 0 && size <= s->cluster_size); 767 if (s->free_byte_offset == 0) { 768 offset = qcow2_alloc_clusters(bs, s->cluster_size); 769 if (offset < 0) { 770 return offset; 771 } 772 s->free_byte_offset = offset; 773 } 774 redo: 775 free_in_cluster = s->cluster_size - 776 offset_into_cluster(s, s->free_byte_offset); 777 if (size <= free_in_cluster) { 778 /* enough space in current cluster */ 779 offset = s->free_byte_offset; 780 s->free_byte_offset += size; 781 free_in_cluster -= size; 782 if (free_in_cluster == 0) 783 s->free_byte_offset = 0; 784 if (offset_into_cluster(s, offset) != 0) 785 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1, 786 QCOW2_DISCARD_NEVER); 787 } else { 788 offset = qcow2_alloc_clusters(bs, s->cluster_size); 789 if (offset < 0) { 790 return offset; 791 } 792 cluster_offset = start_of_cluster(s, s->free_byte_offset); 793 if ((cluster_offset + s->cluster_size) == offset) { 794 /* we are lucky: contiguous data */ 795 offset = s->free_byte_offset; 796 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1, 797 QCOW2_DISCARD_NEVER); 798 s->free_byte_offset += size; 799 } else { 800 s->free_byte_offset = offset; 801 goto redo; 802 } 803 } 804 805 /* The cluster refcount was incremented, either by qcow2_alloc_clusters() 806 * or explicitly by qcow2_update_cluster_refcount(). Refcount blocks must 807 * be flushed before the caller's L2 table updates. 808 */ 809 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); 810 return offset; 811 } 812 813 void qcow2_free_clusters(BlockDriverState *bs, 814 int64_t offset, int64_t size, 815 enum qcow2_discard_type type) 816 { 817 int ret; 818 819 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); 820 ret = update_refcount(bs, offset, size, -1, type); 821 if (ret < 0) { 822 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); 823 /* TODO Remember the clusters to free them later and avoid leaking */ 824 } 825 } 826 827 /* 828 * Free a cluster using its L2 entry (handles clusters of all types, e.g. 829 * normal cluster, compressed cluster, etc.) 830 */ 831 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry, 832 int nb_clusters, enum qcow2_discard_type type) 833 { 834 BDRVQcowState *s = bs->opaque; 835 836 switch (qcow2_get_cluster_type(l2_entry)) { 837 case QCOW2_CLUSTER_COMPRESSED: 838 { 839 int nb_csectors; 840 nb_csectors = ((l2_entry >> s->csize_shift) & 841 s->csize_mask) + 1; 842 qcow2_free_clusters(bs, 843 (l2_entry & s->cluster_offset_mask) & ~511, 844 nb_csectors * 512, type); 845 } 846 break; 847 case QCOW2_CLUSTER_NORMAL: 848 case QCOW2_CLUSTER_ZERO: 849 if (l2_entry & L2E_OFFSET_MASK) { 850 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) { 851 qcow2_signal_corruption(bs, false, -1, -1, 852 "Cannot free unaligned cluster %#llx", 853 l2_entry & L2E_OFFSET_MASK); 854 } else { 855 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK, 856 nb_clusters << s->cluster_bits, type); 857 } 858 } 859 break; 860 case QCOW2_CLUSTER_UNALLOCATED: 861 break; 862 default: 863 abort(); 864 } 865 } 866 867 868 869 /*********************************************************/ 870 /* snapshots and image creation */ 871 872 873 874 /* update the refcounts of snapshots and the copied flag */ 875 int qcow2_update_snapshot_refcount(BlockDriverState *bs, 876 int64_t l1_table_offset, int l1_size, int addend) 877 { 878 BDRVQcowState *s = bs->opaque; 879 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2; 880 bool l1_allocated = false; 881 int64_t old_offset, old_l2_offset; 882 int i, j, l1_modified = 0, nb_csectors, refcount; 883 int ret; 884 885 l2_table = NULL; 886 l1_table = NULL; 887 l1_size2 = l1_size * sizeof(uint64_t); 888 889 s->cache_discards = true; 890 891 /* WARNING: qcow2_snapshot_goto relies on this function not using the 892 * l1_table_offset when it is the current s->l1_table_offset! Be careful 893 * when changing this! */ 894 if (l1_table_offset != s->l1_table_offset) { 895 l1_table = g_try_malloc0(align_offset(l1_size2, 512)); 896 if (l1_size2 && l1_table == NULL) { 897 ret = -ENOMEM; 898 goto fail; 899 } 900 l1_allocated = true; 901 902 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); 903 if (ret < 0) { 904 goto fail; 905 } 906 907 for(i = 0;i < l1_size; i++) 908 be64_to_cpus(&l1_table[i]); 909 } else { 910 assert(l1_size == s->l1_size); 911 l1_table = s->l1_table; 912 l1_allocated = false; 913 } 914 915 for(i = 0; i < l1_size; i++) { 916 l2_offset = l1_table[i]; 917 if (l2_offset) { 918 old_l2_offset = l2_offset; 919 l2_offset &= L1E_OFFSET_MASK; 920 921 if (offset_into_cluster(s, l2_offset)) { 922 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 923 PRIx64 " unaligned (L1 index: %#x)", 924 l2_offset, i); 925 ret = -EIO; 926 goto fail; 927 } 928 929 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 930 (void**) &l2_table); 931 if (ret < 0) { 932 goto fail; 933 } 934 935 for(j = 0; j < s->l2_size; j++) { 936 uint64_t cluster_index; 937 938 offset = be64_to_cpu(l2_table[j]); 939 old_offset = offset; 940 offset &= ~QCOW_OFLAG_COPIED; 941 942 switch (qcow2_get_cluster_type(offset)) { 943 case QCOW2_CLUSTER_COMPRESSED: 944 nb_csectors = ((offset >> s->csize_shift) & 945 s->csize_mask) + 1; 946 if (addend != 0) { 947 ret = update_refcount(bs, 948 (offset & s->cluster_offset_mask) & ~511, 949 nb_csectors * 512, addend, 950 QCOW2_DISCARD_SNAPSHOT); 951 if (ret < 0) { 952 goto fail; 953 } 954 } 955 /* compressed clusters are never modified */ 956 refcount = 2; 957 break; 958 959 case QCOW2_CLUSTER_NORMAL: 960 case QCOW2_CLUSTER_ZERO: 961 if (offset_into_cluster(s, offset & L2E_OFFSET_MASK)) { 962 qcow2_signal_corruption(bs, true, -1, -1, "Data " 963 "cluster offset %#llx " 964 "unaligned (L2 offset: %#" 965 PRIx64 ", L2 index: %#x)", 966 offset & L2E_OFFSET_MASK, 967 l2_offset, j); 968 ret = -EIO; 969 goto fail; 970 } 971 972 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits; 973 if (!cluster_index) { 974 /* unallocated */ 975 refcount = 0; 976 break; 977 } 978 if (addend != 0) { 979 refcount = qcow2_update_cluster_refcount(bs, 980 cluster_index, addend, 981 QCOW2_DISCARD_SNAPSHOT); 982 } else { 983 refcount = qcow2_get_refcount(bs, cluster_index); 984 } 985 986 if (refcount < 0) { 987 ret = refcount; 988 goto fail; 989 } 990 break; 991 992 case QCOW2_CLUSTER_UNALLOCATED: 993 refcount = 0; 994 break; 995 996 default: 997 abort(); 998 } 999 1000 if (refcount == 1) { 1001 offset |= QCOW_OFLAG_COPIED; 1002 } 1003 if (offset != old_offset) { 1004 if (addend > 0) { 1005 qcow2_cache_set_dependency(bs, s->l2_table_cache, 1006 s->refcount_block_cache); 1007 } 1008 l2_table[j] = cpu_to_be64(offset); 1009 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1010 } 1011 } 1012 1013 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1014 if (ret < 0) { 1015 goto fail; 1016 } 1017 1018 1019 if (addend != 0) { 1020 refcount = qcow2_update_cluster_refcount(bs, l2_offset >> 1021 s->cluster_bits, addend, QCOW2_DISCARD_SNAPSHOT); 1022 } else { 1023 refcount = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits); 1024 } 1025 if (refcount < 0) { 1026 ret = refcount; 1027 goto fail; 1028 } else if (refcount == 1) { 1029 l2_offset |= QCOW_OFLAG_COPIED; 1030 } 1031 if (l2_offset != old_l2_offset) { 1032 l1_table[i] = l2_offset; 1033 l1_modified = 1; 1034 } 1035 } 1036 } 1037 1038 ret = bdrv_flush(bs); 1039 fail: 1040 if (l2_table) { 1041 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1042 } 1043 1044 s->cache_discards = false; 1045 qcow2_process_discards(bs, ret); 1046 1047 /* Update L1 only if it isn't deleted anyway (addend = -1) */ 1048 if (ret == 0 && addend >= 0 && l1_modified) { 1049 for (i = 0; i < l1_size; i++) { 1050 cpu_to_be64s(&l1_table[i]); 1051 } 1052 1053 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2); 1054 1055 for (i = 0; i < l1_size; i++) { 1056 be64_to_cpus(&l1_table[i]); 1057 } 1058 } 1059 if (l1_allocated) 1060 g_free(l1_table); 1061 return ret; 1062 } 1063 1064 1065 1066 1067 /*********************************************************/ 1068 /* refcount checking functions */ 1069 1070 1071 1072 /* 1073 * Increases the refcount for a range of clusters in a given refcount table. 1074 * This is used to construct a temporary refcount table out of L1 and L2 tables 1075 * which can be compared the the refcount table saved in the image. 1076 * 1077 * Modifies the number of errors in res. 1078 */ 1079 static int inc_refcounts(BlockDriverState *bs, 1080 BdrvCheckResult *res, 1081 uint16_t **refcount_table, 1082 int64_t *refcount_table_size, 1083 int64_t offset, int64_t size) 1084 { 1085 BDRVQcowState *s = bs->opaque; 1086 uint64_t start, last, cluster_offset, k; 1087 1088 if (size <= 0) { 1089 return 0; 1090 } 1091 1092 start = start_of_cluster(s, offset); 1093 last = start_of_cluster(s, offset + size - 1); 1094 for(cluster_offset = start; cluster_offset <= last; 1095 cluster_offset += s->cluster_size) { 1096 k = cluster_offset >> s->cluster_bits; 1097 if (k >= *refcount_table_size) { 1098 int64_t old_refcount_table_size = *refcount_table_size; 1099 uint16_t *new_refcount_table; 1100 1101 *refcount_table_size = k + 1; 1102 new_refcount_table = g_try_realloc(*refcount_table, 1103 *refcount_table_size * 1104 sizeof(**refcount_table)); 1105 if (!new_refcount_table) { 1106 *refcount_table_size = old_refcount_table_size; 1107 res->check_errors++; 1108 return -ENOMEM; 1109 } 1110 *refcount_table = new_refcount_table; 1111 1112 memset(*refcount_table + old_refcount_table_size, 0, 1113 (*refcount_table_size - old_refcount_table_size) * 1114 sizeof(**refcount_table)); 1115 } 1116 1117 if (++(*refcount_table)[k] == 0) { 1118 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 1119 "\n", cluster_offset); 1120 res->corruptions++; 1121 } 1122 } 1123 1124 return 0; 1125 } 1126 1127 /* Flags for check_refcounts_l1() and check_refcounts_l2() */ 1128 enum { 1129 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */ 1130 }; 1131 1132 /* 1133 * Increases the refcount in the given refcount table for the all clusters 1134 * referenced in the L2 table. While doing so, performs some checks on L2 1135 * entries. 1136 * 1137 * Returns the number of errors found by the checks or -errno if an internal 1138 * error occurred. 1139 */ 1140 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, 1141 uint16_t **refcount_table, int64_t *refcount_table_size, int64_t l2_offset, 1142 int flags) 1143 { 1144 BDRVQcowState *s = bs->opaque; 1145 uint64_t *l2_table, l2_entry; 1146 uint64_t next_contiguous_offset = 0; 1147 int i, l2_size, nb_csectors, ret; 1148 1149 /* Read L2 table from disk */ 1150 l2_size = s->l2_size * sizeof(uint64_t); 1151 l2_table = g_malloc(l2_size); 1152 1153 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size); 1154 if (ret < 0) { 1155 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); 1156 res->check_errors++; 1157 goto fail; 1158 } 1159 1160 /* Do the actual checks */ 1161 for(i = 0; i < s->l2_size; i++) { 1162 l2_entry = be64_to_cpu(l2_table[i]); 1163 1164 switch (qcow2_get_cluster_type(l2_entry)) { 1165 case QCOW2_CLUSTER_COMPRESSED: 1166 /* Compressed clusters don't have QCOW_OFLAG_COPIED */ 1167 if (l2_entry & QCOW_OFLAG_COPIED) { 1168 fprintf(stderr, "ERROR: cluster %" PRId64 ": " 1169 "copied flag must never be set for compressed " 1170 "clusters\n", l2_entry >> s->cluster_bits); 1171 l2_entry &= ~QCOW_OFLAG_COPIED; 1172 res->corruptions++; 1173 } 1174 1175 /* Mark cluster as used */ 1176 nb_csectors = ((l2_entry >> s->csize_shift) & 1177 s->csize_mask) + 1; 1178 l2_entry &= s->cluster_offset_mask; 1179 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1180 l2_entry & ~511, nb_csectors * 512); 1181 if (ret < 0) { 1182 goto fail; 1183 } 1184 1185 if (flags & CHECK_FRAG_INFO) { 1186 res->bfi.allocated_clusters++; 1187 res->bfi.compressed_clusters++; 1188 1189 /* Compressed clusters are fragmented by nature. Since they 1190 * take up sub-sector space but we only have sector granularity 1191 * I/O we need to re-read the same sectors even for adjacent 1192 * compressed clusters. 1193 */ 1194 res->bfi.fragmented_clusters++; 1195 } 1196 break; 1197 1198 case QCOW2_CLUSTER_ZERO: 1199 if ((l2_entry & L2E_OFFSET_MASK) == 0) { 1200 break; 1201 } 1202 /* fall through */ 1203 1204 case QCOW2_CLUSTER_NORMAL: 1205 { 1206 uint64_t offset = l2_entry & L2E_OFFSET_MASK; 1207 1208 if (flags & CHECK_FRAG_INFO) { 1209 res->bfi.allocated_clusters++; 1210 if (next_contiguous_offset && 1211 offset != next_contiguous_offset) { 1212 res->bfi.fragmented_clusters++; 1213 } 1214 next_contiguous_offset = offset + s->cluster_size; 1215 } 1216 1217 /* Mark cluster as used */ 1218 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1219 offset, s->cluster_size); 1220 if (ret < 0) { 1221 goto fail; 1222 } 1223 1224 /* Correct offsets are cluster aligned */ 1225 if (offset_into_cluster(s, offset)) { 1226 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not " 1227 "properly aligned; L2 entry corrupted.\n", offset); 1228 res->corruptions++; 1229 } 1230 break; 1231 } 1232 1233 case QCOW2_CLUSTER_UNALLOCATED: 1234 break; 1235 1236 default: 1237 abort(); 1238 } 1239 } 1240 1241 g_free(l2_table); 1242 return 0; 1243 1244 fail: 1245 g_free(l2_table); 1246 return ret; 1247 } 1248 1249 /* 1250 * Increases the refcount for the L1 table, its L2 tables and all referenced 1251 * clusters in the given refcount table. While doing so, performs some checks 1252 * on L1 and L2 entries. 1253 * 1254 * Returns the number of errors found by the checks or -errno if an internal 1255 * error occurred. 1256 */ 1257 static int check_refcounts_l1(BlockDriverState *bs, 1258 BdrvCheckResult *res, 1259 uint16_t **refcount_table, 1260 int64_t *refcount_table_size, 1261 int64_t l1_table_offset, int l1_size, 1262 int flags) 1263 { 1264 BDRVQcowState *s = bs->opaque; 1265 uint64_t *l1_table = NULL, l2_offset, l1_size2; 1266 int i, ret; 1267 1268 l1_size2 = l1_size * sizeof(uint64_t); 1269 1270 /* Mark L1 table as used */ 1271 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1272 l1_table_offset, l1_size2); 1273 if (ret < 0) { 1274 goto fail; 1275 } 1276 1277 /* Read L1 table entries from disk */ 1278 if (l1_size2 > 0) { 1279 l1_table = g_try_malloc(l1_size2); 1280 if (l1_table == NULL) { 1281 ret = -ENOMEM; 1282 res->check_errors++; 1283 goto fail; 1284 } 1285 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); 1286 if (ret < 0) { 1287 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); 1288 res->check_errors++; 1289 goto fail; 1290 } 1291 for(i = 0;i < l1_size; i++) 1292 be64_to_cpus(&l1_table[i]); 1293 } 1294 1295 /* Do the actual checks */ 1296 for(i = 0; i < l1_size; i++) { 1297 l2_offset = l1_table[i]; 1298 if (l2_offset) { 1299 /* Mark L2 table as used */ 1300 l2_offset &= L1E_OFFSET_MASK; 1301 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1302 l2_offset, s->cluster_size); 1303 if (ret < 0) { 1304 goto fail; 1305 } 1306 1307 /* L2 tables are cluster aligned */ 1308 if (offset_into_cluster(s, l2_offset)) { 1309 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " 1310 "cluster aligned; L1 entry corrupted\n", l2_offset); 1311 res->corruptions++; 1312 } 1313 1314 /* Process and check L2 entries */ 1315 ret = check_refcounts_l2(bs, res, refcount_table, 1316 refcount_table_size, l2_offset, flags); 1317 if (ret < 0) { 1318 goto fail; 1319 } 1320 } 1321 } 1322 g_free(l1_table); 1323 return 0; 1324 1325 fail: 1326 g_free(l1_table); 1327 return ret; 1328 } 1329 1330 /* 1331 * Checks the OFLAG_COPIED flag for all L1 and L2 entries. 1332 * 1333 * This function does not print an error message nor does it increment 1334 * check_errors if qcow2_get_refcount fails (this is because such an error will 1335 * have been already detected and sufficiently signaled by the calling function 1336 * (qcow2_check_refcounts) by the time this function is called). 1337 */ 1338 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, 1339 BdrvCheckMode fix) 1340 { 1341 BDRVQcowState *s = bs->opaque; 1342 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); 1343 int ret; 1344 int refcount; 1345 int i, j; 1346 1347 for (i = 0; i < s->l1_size; i++) { 1348 uint64_t l1_entry = s->l1_table[i]; 1349 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK; 1350 bool l2_dirty = false; 1351 1352 if (!l2_offset) { 1353 continue; 1354 } 1355 1356 refcount = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits); 1357 if (refcount < 0) { 1358 /* don't print message nor increment check_errors */ 1359 continue; 1360 } 1361 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) { 1362 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d " 1363 "l1_entry=%" PRIx64 " refcount=%d\n", 1364 fix & BDRV_FIX_ERRORS ? "Repairing" : 1365 "ERROR", 1366 i, l1_entry, refcount); 1367 if (fix & BDRV_FIX_ERRORS) { 1368 s->l1_table[i] = refcount == 1 1369 ? l1_entry | QCOW_OFLAG_COPIED 1370 : l1_entry & ~QCOW_OFLAG_COPIED; 1371 ret = qcow2_write_l1_entry(bs, i); 1372 if (ret < 0) { 1373 res->check_errors++; 1374 goto fail; 1375 } 1376 res->corruptions_fixed++; 1377 } else { 1378 res->corruptions++; 1379 } 1380 } 1381 1382 ret = bdrv_pread(bs->file, l2_offset, l2_table, 1383 s->l2_size * sizeof(uint64_t)); 1384 if (ret < 0) { 1385 fprintf(stderr, "ERROR: Could not read L2 table: %s\n", 1386 strerror(-ret)); 1387 res->check_errors++; 1388 goto fail; 1389 } 1390 1391 for (j = 0; j < s->l2_size; j++) { 1392 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1393 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK; 1394 int cluster_type = qcow2_get_cluster_type(l2_entry); 1395 1396 if ((cluster_type == QCOW2_CLUSTER_NORMAL) || 1397 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) { 1398 refcount = qcow2_get_refcount(bs, 1399 data_offset >> s->cluster_bits); 1400 if (refcount < 0) { 1401 /* don't print message nor increment check_errors */ 1402 continue; 1403 } 1404 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) { 1405 fprintf(stderr, "%s OFLAG_COPIED data cluster: " 1406 "l2_entry=%" PRIx64 " refcount=%d\n", 1407 fix & BDRV_FIX_ERRORS ? "Repairing" : 1408 "ERROR", 1409 l2_entry, refcount); 1410 if (fix & BDRV_FIX_ERRORS) { 1411 l2_table[j] = cpu_to_be64(refcount == 1 1412 ? l2_entry | QCOW_OFLAG_COPIED 1413 : l2_entry & ~QCOW_OFLAG_COPIED); 1414 l2_dirty = true; 1415 res->corruptions_fixed++; 1416 } else { 1417 res->corruptions++; 1418 } 1419 } 1420 } 1421 } 1422 1423 if (l2_dirty) { 1424 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2, 1425 l2_offset, s->cluster_size); 1426 if (ret < 0) { 1427 fprintf(stderr, "ERROR: Could not write L2 table; metadata " 1428 "overlap check failed: %s\n", strerror(-ret)); 1429 res->check_errors++; 1430 goto fail; 1431 } 1432 1433 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size); 1434 if (ret < 0) { 1435 fprintf(stderr, "ERROR: Could not write L2 table: %s\n", 1436 strerror(-ret)); 1437 res->check_errors++; 1438 goto fail; 1439 } 1440 } 1441 } 1442 1443 ret = 0; 1444 1445 fail: 1446 qemu_vfree(l2_table); 1447 return ret; 1448 } 1449 1450 /* 1451 * Checks consistency of refblocks and accounts for each refblock in 1452 * *refcount_table. 1453 */ 1454 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, 1455 BdrvCheckMode fix, bool *rebuild, 1456 uint16_t **refcount_table, int64_t *nb_clusters) 1457 { 1458 BDRVQcowState *s = bs->opaque; 1459 int64_t i, size; 1460 int ret; 1461 1462 for(i = 0; i < s->refcount_table_size; i++) { 1463 uint64_t offset, cluster; 1464 offset = s->refcount_table[i]; 1465 cluster = offset >> s->cluster_bits; 1466 1467 /* Refcount blocks are cluster aligned */ 1468 if (offset_into_cluster(s, offset)) { 1469 fprintf(stderr, "ERROR refcount block %" PRId64 " is not " 1470 "cluster aligned; refcount table entry corrupted\n", i); 1471 res->corruptions++; 1472 *rebuild = true; 1473 continue; 1474 } 1475 1476 if (cluster >= *nb_clusters) { 1477 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n", 1478 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); 1479 1480 if (fix & BDRV_FIX_ERRORS) { 1481 int64_t old_nb_clusters = *nb_clusters; 1482 uint16_t *new_refcount_table; 1483 1484 if (offset > INT64_MAX - s->cluster_size) { 1485 ret = -EINVAL; 1486 goto resize_fail; 1487 } 1488 1489 ret = bdrv_truncate(bs->file, offset + s->cluster_size); 1490 if (ret < 0) { 1491 goto resize_fail; 1492 } 1493 size = bdrv_getlength(bs->file); 1494 if (size < 0) { 1495 ret = size; 1496 goto resize_fail; 1497 } 1498 1499 *nb_clusters = size_to_clusters(s, size); 1500 assert(*nb_clusters >= old_nb_clusters); 1501 1502 new_refcount_table = g_try_realloc(*refcount_table, 1503 *nb_clusters * 1504 sizeof(**refcount_table)); 1505 if (!new_refcount_table) { 1506 *nb_clusters = old_nb_clusters; 1507 res->check_errors++; 1508 return -ENOMEM; 1509 } 1510 *refcount_table = new_refcount_table; 1511 1512 memset(*refcount_table + old_nb_clusters, 0, 1513 (*nb_clusters - old_nb_clusters) * 1514 sizeof(**refcount_table)); 1515 1516 if (cluster >= *nb_clusters) { 1517 ret = -EINVAL; 1518 goto resize_fail; 1519 } 1520 1521 res->corruptions_fixed++; 1522 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1523 offset, s->cluster_size); 1524 if (ret < 0) { 1525 return ret; 1526 } 1527 /* No need to check whether the refcount is now greater than 1: 1528 * This area was just allocated and zeroed, so it can only be 1529 * exactly 1 after inc_refcounts() */ 1530 continue; 1531 1532 resize_fail: 1533 res->corruptions++; 1534 *rebuild = true; 1535 fprintf(stderr, "ERROR could not resize image: %s\n", 1536 strerror(-ret)); 1537 } else { 1538 res->corruptions++; 1539 } 1540 continue; 1541 } 1542 1543 if (offset != 0) { 1544 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1545 offset, s->cluster_size); 1546 if (ret < 0) { 1547 return ret; 1548 } 1549 if ((*refcount_table)[cluster] != 1) { 1550 fprintf(stderr, "ERROR refcount block %" PRId64 1551 " refcount=%d\n", i, (*refcount_table)[cluster]); 1552 res->corruptions++; 1553 *rebuild = true; 1554 } 1555 } 1556 } 1557 1558 return 0; 1559 } 1560 1561 /* 1562 * Calculates an in-memory refcount table. 1563 */ 1564 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1565 BdrvCheckMode fix, bool *rebuild, 1566 uint16_t **refcount_table, int64_t *nb_clusters) 1567 { 1568 BDRVQcowState *s = bs->opaque; 1569 int64_t i; 1570 QCowSnapshot *sn; 1571 int ret; 1572 1573 if (!*refcount_table) { 1574 *refcount_table = g_try_new0(uint16_t, *nb_clusters); 1575 if (*nb_clusters && *refcount_table == NULL) { 1576 res->check_errors++; 1577 return -ENOMEM; 1578 } 1579 } 1580 1581 /* header */ 1582 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1583 0, s->cluster_size); 1584 if (ret < 0) { 1585 return ret; 1586 } 1587 1588 /* current L1 table */ 1589 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1590 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO); 1591 if (ret < 0) { 1592 return ret; 1593 } 1594 1595 /* snapshots */ 1596 for (i = 0; i < s->nb_snapshots; i++) { 1597 sn = s->snapshots + i; 1598 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1599 sn->l1_table_offset, sn->l1_size, 0); 1600 if (ret < 0) { 1601 return ret; 1602 } 1603 } 1604 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1605 s->snapshots_offset, s->snapshots_size); 1606 if (ret < 0) { 1607 return ret; 1608 } 1609 1610 /* refcount data */ 1611 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1612 s->refcount_table_offset, 1613 s->refcount_table_size * sizeof(uint64_t)); 1614 if (ret < 0) { 1615 return ret; 1616 } 1617 1618 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters); 1619 } 1620 1621 /* 1622 * Compares the actual reference count for each cluster in the image against the 1623 * refcount as reported by the refcount structures on-disk. 1624 */ 1625 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1626 BdrvCheckMode fix, bool *rebuild, 1627 int64_t *highest_cluster, 1628 uint16_t *refcount_table, int64_t nb_clusters) 1629 { 1630 BDRVQcowState *s = bs->opaque; 1631 int64_t i; 1632 int refcount1, refcount2, ret; 1633 1634 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) { 1635 refcount1 = qcow2_get_refcount(bs, i); 1636 if (refcount1 < 0) { 1637 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", 1638 i, strerror(-refcount1)); 1639 res->check_errors++; 1640 continue; 1641 } 1642 1643 refcount2 = refcount_table[i]; 1644 1645 if (refcount1 > 0 || refcount2 > 0) { 1646 *highest_cluster = i; 1647 } 1648 1649 if (refcount1 != refcount2) { 1650 /* Check if we're allowed to fix the mismatch */ 1651 int *num_fixed = NULL; 1652 if (refcount1 == 0) { 1653 *rebuild = true; 1654 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { 1655 num_fixed = &res->leaks_fixed; 1656 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { 1657 num_fixed = &res->corruptions_fixed; 1658 } 1659 1660 fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n", 1661 num_fixed != NULL ? "Repairing" : 1662 refcount1 < refcount2 ? "ERROR" : 1663 "Leaked", 1664 i, refcount1, refcount2); 1665 1666 if (num_fixed) { 1667 ret = update_refcount(bs, i << s->cluster_bits, 1, 1668 refcount2 - refcount1, 1669 QCOW2_DISCARD_ALWAYS); 1670 if (ret >= 0) { 1671 (*num_fixed)++; 1672 continue; 1673 } 1674 } 1675 1676 /* And if we couldn't, print an error */ 1677 if (refcount1 < refcount2) { 1678 res->corruptions++; 1679 } else { 1680 res->leaks++; 1681 } 1682 } 1683 } 1684 } 1685 1686 /* 1687 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to 1688 * the on-disk refcount structures. 1689 * 1690 * On input, *first_free_cluster tells where to start looking, and need not 1691 * actually be a free cluster; the returned offset will not be before that 1692 * cluster. On output, *first_free_cluster points to the first gap found, even 1693 * if that gap was too small to be used as the returned offset. 1694 * 1695 * Note that *first_free_cluster is a cluster index whereas the return value is 1696 * an offset. 1697 */ 1698 static int64_t alloc_clusters_imrt(BlockDriverState *bs, 1699 int cluster_count, 1700 uint16_t **refcount_table, 1701 int64_t *imrt_nb_clusters, 1702 int64_t *first_free_cluster) 1703 { 1704 BDRVQcowState *s = bs->opaque; 1705 int64_t cluster = *first_free_cluster, i; 1706 bool first_gap = true; 1707 int contiguous_free_clusters; 1708 1709 /* Starting at *first_free_cluster, find a range of at least cluster_count 1710 * continuously free clusters */ 1711 for (contiguous_free_clusters = 0; 1712 cluster < *imrt_nb_clusters && 1713 contiguous_free_clusters < cluster_count; 1714 cluster++) 1715 { 1716 if (!(*refcount_table)[cluster]) { 1717 contiguous_free_clusters++; 1718 if (first_gap) { 1719 /* If this is the first free cluster found, update 1720 * *first_free_cluster accordingly */ 1721 *first_free_cluster = cluster; 1722 first_gap = false; 1723 } 1724 } else if (contiguous_free_clusters) { 1725 contiguous_free_clusters = 0; 1726 } 1727 } 1728 1729 /* If contiguous_free_clusters is greater than zero, it contains the number 1730 * of continuously free clusters until the current cluster; the first free 1731 * cluster in the current "gap" is therefore 1732 * cluster - contiguous_free_clusters */ 1733 1734 /* If no such range could be found, grow the in-memory refcount table 1735 * accordingly to append free clusters at the end of the image */ 1736 if (contiguous_free_clusters < cluster_count) { 1737 int64_t old_imrt_nb_clusters = *imrt_nb_clusters; 1738 uint16_t *new_refcount_table; 1739 1740 /* contiguous_free_clusters clusters are already empty at the image end; 1741 * we need cluster_count clusters; therefore, we have to allocate 1742 * cluster_count - contiguous_free_clusters new clusters at the end of 1743 * the image (which is the current value of cluster; note that cluster 1744 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond 1745 * the image end) */ 1746 *imrt_nb_clusters = cluster + cluster_count - contiguous_free_clusters; 1747 new_refcount_table = g_try_realloc(*refcount_table, 1748 *imrt_nb_clusters * 1749 sizeof(**refcount_table)); 1750 if (!new_refcount_table) { 1751 *imrt_nb_clusters = old_imrt_nb_clusters; 1752 return -ENOMEM; 1753 } 1754 *refcount_table = new_refcount_table; 1755 1756 memset(*refcount_table + old_imrt_nb_clusters, 0, 1757 (*imrt_nb_clusters - old_imrt_nb_clusters) * 1758 sizeof(**refcount_table)); 1759 } 1760 1761 /* Go back to the first free cluster */ 1762 cluster -= contiguous_free_clusters; 1763 for (i = 0; i < cluster_count; i++) { 1764 (*refcount_table)[cluster + i] = 1; 1765 } 1766 1767 return cluster << s->cluster_bits; 1768 } 1769 1770 /* 1771 * Creates a new refcount structure based solely on the in-memory information 1772 * given through *refcount_table. All necessary allocations will be reflected 1773 * in that array. 1774 * 1775 * On success, the old refcount structure is leaked (it will be covered by the 1776 * new refcount structure). 1777 */ 1778 static int rebuild_refcount_structure(BlockDriverState *bs, 1779 BdrvCheckResult *res, 1780 uint16_t **refcount_table, 1781 int64_t *nb_clusters) 1782 { 1783 BDRVQcowState *s = bs->opaque; 1784 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0; 1785 int64_t refblock_offset, refblock_start, refblock_index; 1786 uint32_t reftable_size = 0; 1787 uint64_t *on_disk_reftable = NULL; 1788 uint16_t *on_disk_refblock; 1789 int i, ret = 0; 1790 struct { 1791 uint64_t reftable_offset; 1792 uint32_t reftable_clusters; 1793 } QEMU_PACKED reftable_offset_and_clusters; 1794 1795 qcow2_cache_empty(bs, s->refcount_block_cache); 1796 1797 write_refblocks: 1798 for (; cluster < *nb_clusters; cluster++) { 1799 if (!(*refcount_table)[cluster]) { 1800 continue; 1801 } 1802 1803 refblock_index = cluster >> s->refcount_block_bits; 1804 refblock_start = refblock_index << s->refcount_block_bits; 1805 1806 /* Don't allocate a cluster in a refblock already written to disk */ 1807 if (first_free_cluster < refblock_start) { 1808 first_free_cluster = refblock_start; 1809 } 1810 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table, 1811 nb_clusters, &first_free_cluster); 1812 if (refblock_offset < 0) { 1813 fprintf(stderr, "ERROR allocating refblock: %s\n", 1814 strerror(-refblock_offset)); 1815 res->check_errors++; 1816 ret = refblock_offset; 1817 goto fail; 1818 } 1819 1820 if (reftable_size <= refblock_index) { 1821 uint32_t old_reftable_size = reftable_size; 1822 uint64_t *new_on_disk_reftable; 1823 1824 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t), 1825 s->cluster_size) / sizeof(uint64_t); 1826 new_on_disk_reftable = g_try_realloc(on_disk_reftable, 1827 reftable_size * 1828 sizeof(uint64_t)); 1829 if (!new_on_disk_reftable) { 1830 res->check_errors++; 1831 ret = -ENOMEM; 1832 goto fail; 1833 } 1834 on_disk_reftable = new_on_disk_reftable; 1835 1836 memset(on_disk_reftable + old_reftable_size, 0, 1837 (reftable_size - old_reftable_size) * sizeof(uint64_t)); 1838 1839 /* The offset we have for the reftable is now no longer valid; 1840 * this will leak that range, but we can easily fix that by running 1841 * a leak-fixing check after this rebuild operation */ 1842 reftable_offset = -1; 1843 } 1844 on_disk_reftable[refblock_index] = refblock_offset; 1845 1846 /* If this is apparently the last refblock (for now), try to squeeze the 1847 * reftable in */ 1848 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits && 1849 reftable_offset < 0) 1850 { 1851 uint64_t reftable_clusters = size_to_clusters(s, reftable_size * 1852 sizeof(uint64_t)); 1853 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, 1854 refcount_table, nb_clusters, 1855 &first_free_cluster); 1856 if (reftable_offset < 0) { 1857 fprintf(stderr, "ERROR allocating reftable: %s\n", 1858 strerror(-reftable_offset)); 1859 res->check_errors++; 1860 ret = reftable_offset; 1861 goto fail; 1862 } 1863 } 1864 1865 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset, 1866 s->cluster_size); 1867 if (ret < 0) { 1868 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); 1869 goto fail; 1870 } 1871 1872 on_disk_refblock = qemu_blockalign0(bs->file, s->cluster_size); 1873 for (i = 0; i < s->refcount_block_size && 1874 refblock_start + i < *nb_clusters; i++) 1875 { 1876 on_disk_refblock[i] = 1877 cpu_to_be16((*refcount_table)[refblock_start + i]); 1878 } 1879 1880 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE, 1881 (void *)on_disk_refblock, s->cluster_sectors); 1882 qemu_vfree(on_disk_refblock); 1883 if (ret < 0) { 1884 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); 1885 goto fail; 1886 } 1887 1888 /* Go to the end of this refblock */ 1889 cluster = refblock_start + s->refcount_block_size - 1; 1890 } 1891 1892 if (reftable_offset < 0) { 1893 uint64_t post_refblock_start, reftable_clusters; 1894 1895 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size); 1896 reftable_clusters = size_to_clusters(s, 1897 reftable_size * sizeof(uint64_t)); 1898 /* Not pretty but simple */ 1899 if (first_free_cluster < post_refblock_start) { 1900 first_free_cluster = post_refblock_start; 1901 } 1902 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, 1903 refcount_table, nb_clusters, 1904 &first_free_cluster); 1905 if (reftable_offset < 0) { 1906 fprintf(stderr, "ERROR allocating reftable: %s\n", 1907 strerror(-reftable_offset)); 1908 res->check_errors++; 1909 ret = reftable_offset; 1910 goto fail; 1911 } 1912 1913 goto write_refblocks; 1914 } 1915 1916 assert(on_disk_reftable); 1917 1918 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { 1919 cpu_to_be64s(&on_disk_reftable[refblock_index]); 1920 } 1921 1922 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, 1923 reftable_size * sizeof(uint64_t)); 1924 if (ret < 0) { 1925 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); 1926 goto fail; 1927 } 1928 1929 assert(reftable_size < INT_MAX / sizeof(uint64_t)); 1930 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable, 1931 reftable_size * sizeof(uint64_t)); 1932 if (ret < 0) { 1933 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); 1934 goto fail; 1935 } 1936 1937 /* Enter new reftable into the image header */ 1938 cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset, 1939 reftable_offset); 1940 cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters, 1941 size_to_clusters(s, reftable_size * sizeof(uint64_t))); 1942 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, 1943 refcount_table_offset), 1944 &reftable_offset_and_clusters, 1945 sizeof(reftable_offset_and_clusters)); 1946 if (ret < 0) { 1947 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret)); 1948 goto fail; 1949 } 1950 1951 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { 1952 be64_to_cpus(&on_disk_reftable[refblock_index]); 1953 } 1954 s->refcount_table = on_disk_reftable; 1955 s->refcount_table_offset = reftable_offset; 1956 s->refcount_table_size = reftable_size; 1957 1958 return 0; 1959 1960 fail: 1961 g_free(on_disk_reftable); 1962 return ret; 1963 } 1964 1965 /* 1966 * Checks an image for refcount consistency. 1967 * 1968 * Returns 0 if no errors are found, the number of errors in case the image is 1969 * detected as corrupted, and -errno when an internal error occurred. 1970 */ 1971 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1972 BdrvCheckMode fix) 1973 { 1974 BDRVQcowState *s = bs->opaque; 1975 BdrvCheckResult pre_compare_res; 1976 int64_t size, highest_cluster, nb_clusters; 1977 uint16_t *refcount_table = NULL; 1978 bool rebuild = false; 1979 int ret; 1980 1981 size = bdrv_getlength(bs->file); 1982 if (size < 0) { 1983 res->check_errors++; 1984 return size; 1985 } 1986 1987 nb_clusters = size_to_clusters(s, size); 1988 if (nb_clusters > INT_MAX) { 1989 res->check_errors++; 1990 return -EFBIG; 1991 } 1992 1993 res->bfi.total_clusters = 1994 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); 1995 1996 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table, 1997 &nb_clusters); 1998 if (ret < 0) { 1999 goto fail; 2000 } 2001 2002 /* In case we don't need to rebuild the refcount structure (but want to fix 2003 * something), this function is immediately called again, in which case the 2004 * result should be ignored */ 2005 pre_compare_res = *res; 2006 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table, 2007 nb_clusters); 2008 2009 if (rebuild && (fix & BDRV_FIX_ERRORS)) { 2010 BdrvCheckResult old_res = *res; 2011 int fresh_leaks = 0; 2012 2013 fprintf(stderr, "Rebuilding refcount structure\n"); 2014 ret = rebuild_refcount_structure(bs, res, &refcount_table, 2015 &nb_clusters); 2016 if (ret < 0) { 2017 goto fail; 2018 } 2019 2020 res->corruptions = 0; 2021 res->leaks = 0; 2022 2023 /* Because the old reftable has been exchanged for a new one the 2024 * references have to be recalculated */ 2025 rebuild = false; 2026 memset(refcount_table, 0, nb_clusters * sizeof(uint16_t)); 2027 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table, 2028 &nb_clusters); 2029 if (ret < 0) { 2030 goto fail; 2031 } 2032 2033 if (fix & BDRV_FIX_LEAKS) { 2034 /* The old refcount structures are now leaked, fix it; the result 2035 * can be ignored, aside from leaks which were introduced by 2036 * rebuild_refcount_structure() that could not be fixed */ 2037 BdrvCheckResult saved_res = *res; 2038 *res = (BdrvCheckResult){ 0 }; 2039 2040 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild, 2041 &highest_cluster, refcount_table, nb_clusters); 2042 if (rebuild) { 2043 fprintf(stderr, "ERROR rebuilt refcount structure is still " 2044 "broken\n"); 2045 } 2046 2047 /* Any leaks accounted for here were introduced by 2048 * rebuild_refcount_structure() because that function has created a 2049 * new refcount structure from scratch */ 2050 fresh_leaks = res->leaks; 2051 *res = saved_res; 2052 } 2053 2054 if (res->corruptions < old_res.corruptions) { 2055 res->corruptions_fixed += old_res.corruptions - res->corruptions; 2056 } 2057 if (res->leaks < old_res.leaks) { 2058 res->leaks_fixed += old_res.leaks - res->leaks; 2059 } 2060 res->leaks += fresh_leaks; 2061 } else if (fix) { 2062 if (rebuild) { 2063 fprintf(stderr, "ERROR need to rebuild refcount structures\n"); 2064 res->check_errors++; 2065 ret = -EIO; 2066 goto fail; 2067 } 2068 2069 if (res->leaks || res->corruptions) { 2070 *res = pre_compare_res; 2071 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster, 2072 refcount_table, nb_clusters); 2073 } 2074 } 2075 2076 /* check OFLAG_COPIED */ 2077 ret = check_oflag_copied(bs, res, fix); 2078 if (ret < 0) { 2079 goto fail; 2080 } 2081 2082 res->image_end_offset = (highest_cluster + 1) * s->cluster_size; 2083 ret = 0; 2084 2085 fail: 2086 g_free(refcount_table); 2087 2088 return ret; 2089 } 2090 2091 #define overlaps_with(ofs, sz) \ 2092 ranges_overlap(offset, size, ofs, sz) 2093 2094 /* 2095 * Checks if the given offset into the image file is actually free to use by 2096 * looking for overlaps with important metadata sections (L1/L2 tables etc.), 2097 * i.e. a sanity check without relying on the refcount tables. 2098 * 2099 * The ign parameter specifies what checks not to perform (being a bitmask of 2100 * QCow2MetadataOverlap values), i.e., what sections to ignore. 2101 * 2102 * Returns: 2103 * - 0 if writing to this offset will not affect the mentioned metadata 2104 * - a positive QCow2MetadataOverlap value indicating one overlapping section 2105 * - a negative value (-errno) indicating an error while performing a check, 2106 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2 2107 */ 2108 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, 2109 int64_t size) 2110 { 2111 BDRVQcowState *s = bs->opaque; 2112 int chk = s->overlap_check & ~ign; 2113 int i, j; 2114 2115 if (!size) { 2116 return 0; 2117 } 2118 2119 if (chk & QCOW2_OL_MAIN_HEADER) { 2120 if (offset < s->cluster_size) { 2121 return QCOW2_OL_MAIN_HEADER; 2122 } 2123 } 2124 2125 /* align range to test to cluster boundaries */ 2126 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); 2127 offset = start_of_cluster(s, offset); 2128 2129 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { 2130 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { 2131 return QCOW2_OL_ACTIVE_L1; 2132 } 2133 } 2134 2135 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { 2136 if (overlaps_with(s->refcount_table_offset, 2137 s->refcount_table_size * sizeof(uint64_t))) { 2138 return QCOW2_OL_REFCOUNT_TABLE; 2139 } 2140 } 2141 2142 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { 2143 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { 2144 return QCOW2_OL_SNAPSHOT_TABLE; 2145 } 2146 } 2147 2148 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { 2149 for (i = 0; i < s->nb_snapshots; i++) { 2150 if (s->snapshots[i].l1_size && 2151 overlaps_with(s->snapshots[i].l1_table_offset, 2152 s->snapshots[i].l1_size * sizeof(uint64_t))) { 2153 return QCOW2_OL_INACTIVE_L1; 2154 } 2155 } 2156 } 2157 2158 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { 2159 for (i = 0; i < s->l1_size; i++) { 2160 if ((s->l1_table[i] & L1E_OFFSET_MASK) && 2161 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, 2162 s->cluster_size)) { 2163 return QCOW2_OL_ACTIVE_L2; 2164 } 2165 } 2166 } 2167 2168 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { 2169 for (i = 0; i < s->refcount_table_size; i++) { 2170 if ((s->refcount_table[i] & REFT_OFFSET_MASK) && 2171 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, 2172 s->cluster_size)) { 2173 return QCOW2_OL_REFCOUNT_BLOCK; 2174 } 2175 } 2176 } 2177 2178 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { 2179 for (i = 0; i < s->nb_snapshots; i++) { 2180 uint64_t l1_ofs = s->snapshots[i].l1_table_offset; 2181 uint32_t l1_sz = s->snapshots[i].l1_size; 2182 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t); 2183 uint64_t *l1 = g_try_malloc(l1_sz2); 2184 int ret; 2185 2186 if (l1_sz2 && l1 == NULL) { 2187 return -ENOMEM; 2188 } 2189 2190 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); 2191 if (ret < 0) { 2192 g_free(l1); 2193 return ret; 2194 } 2195 2196 for (j = 0; j < l1_sz; j++) { 2197 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK; 2198 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { 2199 g_free(l1); 2200 return QCOW2_OL_INACTIVE_L2; 2201 } 2202 } 2203 2204 g_free(l1); 2205 } 2206 } 2207 2208 return 0; 2209 } 2210 2211 static const char *metadata_ol_names[] = { 2212 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header", 2213 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table", 2214 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table", 2215 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table", 2216 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block", 2217 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table", 2218 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table", 2219 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table", 2220 }; 2221 2222 /* 2223 * First performs a check for metadata overlaps (through 2224 * qcow2_check_metadata_overlap); if that fails with a negative value (error 2225 * while performing a check), that value is returned. If an impending overlap 2226 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt 2227 * and -EIO returned. 2228 * 2229 * Returns 0 if there were neither overlaps nor errors while checking for 2230 * overlaps; or a negative value (-errno) on error. 2231 */ 2232 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, 2233 int64_t size) 2234 { 2235 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size); 2236 2237 if (ret < 0) { 2238 return ret; 2239 } else if (ret > 0) { 2240 int metadata_ol_bitnr = ffs(ret) - 1; 2241 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); 2242 2243 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid " 2244 "write on metadata (overlaps with %s)", 2245 metadata_ol_names[metadata_ol_bitnr]); 2246 return -EIO; 2247 } 2248 2249 return 0; 2250 } 2251