1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu-common.h" 26 #include "block/block_int.h" 27 #include "block/qcow2.h" 28 #include "qemu/range.h" 29 #include "qapi/qmp/types.h" 30 31 static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size); 32 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 33 int64_t offset, int64_t length, 34 int addend, enum qcow2_discard_type type); 35 36 37 /*********************************************************/ 38 /* refcount handling */ 39 40 int qcow2_refcount_init(BlockDriverState *bs) 41 { 42 BDRVQcowState *s = bs->opaque; 43 int ret, refcount_table_size2, i; 44 45 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t); 46 s->refcount_table = g_malloc(refcount_table_size2); 47 if (s->refcount_table_size > 0) { 48 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); 49 ret = bdrv_pread(bs->file, s->refcount_table_offset, 50 s->refcount_table, refcount_table_size2); 51 if (ret != refcount_table_size2) 52 goto fail; 53 for(i = 0; i < s->refcount_table_size; i++) 54 be64_to_cpus(&s->refcount_table[i]); 55 } 56 return 0; 57 fail: 58 return -ENOMEM; 59 } 60 61 void qcow2_refcount_close(BlockDriverState *bs) 62 { 63 BDRVQcowState *s = bs->opaque; 64 g_free(s->refcount_table); 65 } 66 67 68 static int load_refcount_block(BlockDriverState *bs, 69 int64_t refcount_block_offset, 70 void **refcount_block) 71 { 72 BDRVQcowState *s = bs->opaque; 73 int ret; 74 75 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD); 76 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 77 refcount_block); 78 79 return ret; 80 } 81 82 /* 83 * Returns the refcount of the cluster given by its index. Any non-negative 84 * return value is the refcount of the cluster, negative values are -errno 85 * and indicate an error. 86 */ 87 static int get_refcount(BlockDriverState *bs, int64_t cluster_index) 88 { 89 BDRVQcowState *s = bs->opaque; 90 int refcount_table_index, block_index; 91 int64_t refcount_block_offset; 92 int ret; 93 uint16_t *refcount_block; 94 uint16_t refcount; 95 96 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); 97 if (refcount_table_index >= s->refcount_table_size) 98 return 0; 99 refcount_block_offset = 100 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 101 if (!refcount_block_offset) 102 return 0; 103 104 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 105 (void**) &refcount_block); 106 if (ret < 0) { 107 return ret; 108 } 109 110 block_index = cluster_index & 111 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); 112 refcount = be16_to_cpu(refcount_block[block_index]); 113 114 ret = qcow2_cache_put(bs, s->refcount_block_cache, 115 (void**) &refcount_block); 116 if (ret < 0) { 117 return ret; 118 } 119 120 return refcount; 121 } 122 123 /* 124 * Rounds the refcount table size up to avoid growing the table for each single 125 * refcount block that is allocated. 126 */ 127 static unsigned int next_refcount_table_size(BDRVQcowState *s, 128 unsigned int min_size) 129 { 130 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1; 131 unsigned int refcount_table_clusters = 132 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3)); 133 134 while (min_clusters > refcount_table_clusters) { 135 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2; 136 } 137 138 return refcount_table_clusters << (s->cluster_bits - 3); 139 } 140 141 142 /* Checks if two offsets are described by the same refcount block */ 143 static int in_same_refcount_block(BDRVQcowState *s, uint64_t offset_a, 144 uint64_t offset_b) 145 { 146 uint64_t block_a = offset_a >> (2 * s->cluster_bits - REFCOUNT_SHIFT); 147 uint64_t block_b = offset_b >> (2 * s->cluster_bits - REFCOUNT_SHIFT); 148 149 return (block_a == block_b); 150 } 151 152 /* 153 * Loads a refcount block. If it doesn't exist yet, it is allocated first 154 * (including growing the refcount table if needed). 155 * 156 * Returns 0 on success or -errno in error case 157 */ 158 static int alloc_refcount_block(BlockDriverState *bs, 159 int64_t cluster_index, uint16_t **refcount_block) 160 { 161 BDRVQcowState *s = bs->opaque; 162 unsigned int refcount_table_index; 163 int ret; 164 165 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 166 167 /* Find the refcount block for the given cluster */ 168 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); 169 170 if (refcount_table_index < s->refcount_table_size) { 171 172 uint64_t refcount_block_offset = 173 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 174 175 /* If it's already there, we're done */ 176 if (refcount_block_offset) { 177 return load_refcount_block(bs, refcount_block_offset, 178 (void**) refcount_block); 179 } 180 } 181 182 /* 183 * If we came here, we need to allocate something. Something is at least 184 * a cluster for the new refcount block. It may also include a new refcount 185 * table if the old refcount table is too small. 186 * 187 * Note that allocating clusters here needs some special care: 188 * 189 * - We can't use the normal qcow2_alloc_clusters(), it would try to 190 * increase the refcount and very likely we would end up with an endless 191 * recursion. Instead we must place the refcount blocks in a way that 192 * they can describe them themselves. 193 * 194 * - We need to consider that at this point we are inside update_refcounts 195 * and doing the initial refcount increase. This means that some clusters 196 * have already been allocated by the caller, but their refcount isn't 197 * accurate yet. free_cluster_index tells us where this allocation ends 198 * as long as we don't overwrite it by freeing clusters. 199 * 200 * - alloc_clusters_noref and qcow2_free_clusters may load a different 201 * refcount block into the cache 202 */ 203 204 *refcount_block = NULL; 205 206 /* We write to the refcount table, so we might depend on L2 tables */ 207 ret = qcow2_cache_flush(bs, s->l2_table_cache); 208 if (ret < 0) { 209 return ret; 210 } 211 212 /* Allocate the refcount block itself and mark it as used */ 213 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); 214 if (new_block < 0) { 215 return new_block; 216 } 217 218 #ifdef DEBUG_ALLOC2 219 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 220 " at %" PRIx64 "\n", 221 refcount_table_index, cluster_index << s->cluster_bits, new_block); 222 #endif 223 224 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { 225 /* Zero the new refcount block before updating it */ 226 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 227 (void**) refcount_block); 228 if (ret < 0) { 229 goto fail_block; 230 } 231 232 memset(*refcount_block, 0, s->cluster_size); 233 234 /* The block describes itself, need to update the cache */ 235 int block_index = (new_block >> s->cluster_bits) & 236 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); 237 (*refcount_block)[block_index] = cpu_to_be16(1); 238 } else { 239 /* Described somewhere else. This can recurse at most twice before we 240 * arrive at a block that describes itself. */ 241 ret = update_refcount(bs, new_block, s->cluster_size, 1, 242 QCOW2_DISCARD_NEVER); 243 if (ret < 0) { 244 goto fail_block; 245 } 246 247 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 248 if (ret < 0) { 249 goto fail_block; 250 } 251 252 /* Initialize the new refcount block only after updating its refcount, 253 * update_refcount uses the refcount cache itself */ 254 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 255 (void**) refcount_block); 256 if (ret < 0) { 257 goto fail_block; 258 } 259 260 memset(*refcount_block, 0, s->cluster_size); 261 } 262 263 /* Now the new refcount block needs to be written to disk */ 264 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); 265 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block); 266 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 267 if (ret < 0) { 268 goto fail_block; 269 } 270 271 /* If the refcount table is big enough, just hook the block up there */ 272 if (refcount_table_index < s->refcount_table_size) { 273 uint64_t data64 = cpu_to_be64(new_block); 274 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); 275 ret = bdrv_pwrite_sync(bs->file, 276 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), 277 &data64, sizeof(data64)); 278 if (ret < 0) { 279 goto fail_block; 280 } 281 282 s->refcount_table[refcount_table_index] = new_block; 283 return 0; 284 } 285 286 ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); 287 if (ret < 0) { 288 goto fail_block; 289 } 290 291 /* 292 * If we come here, we need to grow the refcount table. Again, a new 293 * refcount table needs some space and we can't simply allocate to avoid 294 * endless recursion. 295 * 296 * Therefore let's grab new refcount blocks at the end of the image, which 297 * will describe themselves and the new refcount table. This way we can 298 * reference them only in the new table and do the switch to the new 299 * refcount table at once without producing an inconsistent state in 300 * between. 301 */ 302 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); 303 304 /* Calculate the number of refcount blocks needed so far */ 305 uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT); 306 uint64_t blocks_used = (s->free_cluster_index + 307 refcount_block_clusters - 1) / refcount_block_clusters; 308 309 /* And now we need at least one block more for the new metadata */ 310 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); 311 uint64_t last_table_size; 312 uint64_t blocks_clusters; 313 do { 314 uint64_t table_clusters = 315 size_to_clusters(s, table_size * sizeof(uint64_t)); 316 blocks_clusters = 1 + 317 ((table_clusters + refcount_block_clusters - 1) 318 / refcount_block_clusters); 319 uint64_t meta_clusters = table_clusters + blocks_clusters; 320 321 last_table_size = table_size; 322 table_size = next_refcount_table_size(s, blocks_used + 323 ((meta_clusters + refcount_block_clusters - 1) 324 / refcount_block_clusters)); 325 326 } while (last_table_size != table_size); 327 328 #ifdef DEBUG_ALLOC2 329 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", 330 s->refcount_table_size, table_size); 331 #endif 332 333 /* Create the new refcount table and blocks */ 334 uint64_t meta_offset = (blocks_used * refcount_block_clusters) * 335 s->cluster_size; 336 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; 337 uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size); 338 uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t)); 339 340 assert(meta_offset >= (s->free_cluster_index * s->cluster_size)); 341 342 /* Fill the new refcount table */ 343 memcpy(new_table, s->refcount_table, 344 s->refcount_table_size * sizeof(uint64_t)); 345 new_table[refcount_table_index] = new_block; 346 347 int i; 348 for (i = 0; i < blocks_clusters; i++) { 349 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); 350 } 351 352 /* Fill the refcount blocks */ 353 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); 354 int block = 0; 355 for (i = 0; i < table_clusters + blocks_clusters; i++) { 356 new_blocks[block++] = cpu_to_be16(1); 357 } 358 359 /* Write refcount blocks to disk */ 360 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); 361 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, 362 blocks_clusters * s->cluster_size); 363 g_free(new_blocks); 364 if (ret < 0) { 365 goto fail_table; 366 } 367 368 /* Write refcount table to disk */ 369 for(i = 0; i < table_size; i++) { 370 cpu_to_be64s(&new_table[i]); 371 } 372 373 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); 374 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, 375 table_size * sizeof(uint64_t)); 376 if (ret < 0) { 377 goto fail_table; 378 } 379 380 for(i = 0; i < table_size; i++) { 381 be64_to_cpus(&new_table[i]); 382 } 383 384 /* Hook up the new refcount table in the qcow2 header */ 385 uint8_t data[12]; 386 cpu_to_be64w((uint64_t*)data, table_offset); 387 cpu_to_be32w((uint32_t*)(data + 8), table_clusters); 388 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); 389 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, refcount_table_offset), 390 data, sizeof(data)); 391 if (ret < 0) { 392 goto fail_table; 393 } 394 395 /* And switch it in memory */ 396 uint64_t old_table_offset = s->refcount_table_offset; 397 uint64_t old_table_size = s->refcount_table_size; 398 399 g_free(s->refcount_table); 400 s->refcount_table = new_table; 401 s->refcount_table_size = table_size; 402 s->refcount_table_offset = table_offset; 403 404 /* Free old table. Remember, we must not change free_cluster_index */ 405 uint64_t old_free_cluster_index = s->free_cluster_index; 406 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), 407 QCOW2_DISCARD_OTHER); 408 s->free_cluster_index = old_free_cluster_index; 409 410 ret = load_refcount_block(bs, new_block, (void**) refcount_block); 411 if (ret < 0) { 412 return ret; 413 } 414 415 return 0; 416 417 fail_table: 418 g_free(new_table); 419 fail_block: 420 if (*refcount_block != NULL) { 421 qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block); 422 } 423 return ret; 424 } 425 426 void qcow2_process_discards(BlockDriverState *bs, int ret) 427 { 428 BDRVQcowState *s = bs->opaque; 429 Qcow2DiscardRegion *d, *next; 430 431 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) { 432 QTAILQ_REMOVE(&s->discards, d, next); 433 434 /* Discard is optional, ignore the return value */ 435 if (ret >= 0) { 436 bdrv_discard(bs->file, 437 d->offset >> BDRV_SECTOR_BITS, 438 d->bytes >> BDRV_SECTOR_BITS); 439 } 440 441 g_free(d); 442 } 443 } 444 445 static void update_refcount_discard(BlockDriverState *bs, 446 uint64_t offset, uint64_t length) 447 { 448 BDRVQcowState *s = bs->opaque; 449 Qcow2DiscardRegion *d, *p, *next; 450 451 QTAILQ_FOREACH(d, &s->discards, next) { 452 uint64_t new_start = MIN(offset, d->offset); 453 uint64_t new_end = MAX(offset + length, d->offset + d->bytes); 454 455 if (new_end - new_start <= length + d->bytes) { 456 /* There can't be any overlap, areas ending up here have no 457 * references any more and therefore shouldn't get freed another 458 * time. */ 459 assert(d->bytes + length == new_end - new_start); 460 d->offset = new_start; 461 d->bytes = new_end - new_start; 462 goto found; 463 } 464 } 465 466 d = g_malloc(sizeof(*d)); 467 *d = (Qcow2DiscardRegion) { 468 .bs = bs, 469 .offset = offset, 470 .bytes = length, 471 }; 472 QTAILQ_INSERT_TAIL(&s->discards, d, next); 473 474 found: 475 /* Merge discard requests if they are adjacent now */ 476 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { 477 if (p == d 478 || p->offset > d->offset + d->bytes 479 || d->offset > p->offset + p->bytes) 480 { 481 continue; 482 } 483 484 /* Still no overlap possible */ 485 assert(p->offset == d->offset + d->bytes 486 || d->offset == p->offset + p->bytes); 487 488 QTAILQ_REMOVE(&s->discards, p, next); 489 d->offset = MIN(d->offset, p->offset); 490 d->bytes += p->bytes; 491 } 492 } 493 494 /* XXX: cache several refcount block clusters ? */ 495 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 496 int64_t offset, int64_t length, int addend, enum qcow2_discard_type type) 497 { 498 BDRVQcowState *s = bs->opaque; 499 int64_t start, last, cluster_offset; 500 uint16_t *refcount_block = NULL; 501 int64_t old_table_index = -1; 502 int ret; 503 504 #ifdef DEBUG_ALLOC2 505 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 " addend=%d\n", 506 offset, length, addend); 507 #endif 508 if (length < 0) { 509 return -EINVAL; 510 } else if (length == 0) { 511 return 0; 512 } 513 514 if (addend < 0) { 515 qcow2_cache_set_dependency(bs, s->refcount_block_cache, 516 s->l2_table_cache); 517 } 518 519 start = start_of_cluster(s, offset); 520 last = start_of_cluster(s, offset + length - 1); 521 for(cluster_offset = start; cluster_offset <= last; 522 cluster_offset += s->cluster_size) 523 { 524 int block_index, refcount; 525 int64_t cluster_index = cluster_offset >> s->cluster_bits; 526 int64_t table_index = 527 cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT); 528 529 /* Load the refcount block and allocate it if needed */ 530 if (table_index != old_table_index) { 531 if (refcount_block) { 532 ret = qcow2_cache_put(bs, s->refcount_block_cache, 533 (void**) &refcount_block); 534 if (ret < 0) { 535 goto fail; 536 } 537 } 538 539 ret = alloc_refcount_block(bs, cluster_index, &refcount_block); 540 if (ret < 0) { 541 goto fail; 542 } 543 } 544 old_table_index = table_index; 545 546 qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block); 547 548 /* we can update the count and save it */ 549 block_index = cluster_index & 550 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1); 551 552 refcount = be16_to_cpu(refcount_block[block_index]); 553 refcount += addend; 554 if (refcount < 0 || refcount > 0xffff) { 555 ret = -EINVAL; 556 goto fail; 557 } 558 if (refcount == 0 && cluster_index < s->free_cluster_index) { 559 s->free_cluster_index = cluster_index; 560 } 561 refcount_block[block_index] = cpu_to_be16(refcount); 562 563 if (refcount == 0 && s->discard_passthrough[type]) { 564 update_refcount_discard(bs, cluster_offset, s->cluster_size); 565 } 566 } 567 568 ret = 0; 569 fail: 570 if (!s->cache_discards) { 571 qcow2_process_discards(bs, ret); 572 } 573 574 /* Write last changed block to disk */ 575 if (refcount_block) { 576 int wret; 577 wret = qcow2_cache_put(bs, s->refcount_block_cache, 578 (void**) &refcount_block); 579 if (wret < 0) { 580 return ret < 0 ? ret : wret; 581 } 582 } 583 584 /* 585 * Try do undo any updates if an error is returned (This may succeed in 586 * some cases like ENOSPC for allocating a new refcount block) 587 */ 588 if (ret < 0) { 589 int dummy; 590 dummy = update_refcount(bs, offset, cluster_offset - offset, -addend, 591 QCOW2_DISCARD_NEVER); 592 (void)dummy; 593 } 594 595 return ret; 596 } 597 598 /* 599 * Increases or decreases the refcount of a given cluster by one. 600 * addend must be 1 or -1. 601 * 602 * If the return value is non-negative, it is the new refcount of the cluster. 603 * If it is negative, it is -errno and indicates an error. 604 */ 605 int qcow2_update_cluster_refcount(BlockDriverState *bs, 606 int64_t cluster_index, 607 int addend, 608 enum qcow2_discard_type type) 609 { 610 BDRVQcowState *s = bs->opaque; 611 int ret; 612 613 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend, 614 type); 615 if (ret < 0) { 616 return ret; 617 } 618 619 return get_refcount(bs, cluster_index); 620 } 621 622 623 624 /*********************************************************/ 625 /* cluster allocation functions */ 626 627 628 629 /* return < 0 if error */ 630 static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size) 631 { 632 BDRVQcowState *s = bs->opaque; 633 int i, nb_clusters, refcount; 634 635 nb_clusters = size_to_clusters(s, size); 636 retry: 637 for(i = 0; i < nb_clusters; i++) { 638 int64_t next_cluster_index = s->free_cluster_index++; 639 refcount = get_refcount(bs, next_cluster_index); 640 641 if (refcount < 0) { 642 return refcount; 643 } else if (refcount != 0) { 644 goto retry; 645 } 646 } 647 #ifdef DEBUG_ALLOC2 648 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", 649 size, 650 (s->free_cluster_index - nb_clusters) << s->cluster_bits); 651 #endif 652 return (s->free_cluster_index - nb_clusters) << s->cluster_bits; 653 } 654 655 int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size) 656 { 657 int64_t offset; 658 int ret; 659 660 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); 661 offset = alloc_clusters_noref(bs, size); 662 if (offset < 0) { 663 return offset; 664 } 665 666 ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER); 667 if (ret < 0) { 668 return ret; 669 } 670 671 return offset; 672 } 673 674 int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, 675 int nb_clusters) 676 { 677 BDRVQcowState *s = bs->opaque; 678 uint64_t cluster_index; 679 uint64_t old_free_cluster_index; 680 uint64_t i; 681 int refcount, ret; 682 683 assert(nb_clusters >= 0); 684 if (nb_clusters == 0) { 685 return 0; 686 } 687 688 /* Check how many clusters there are free */ 689 cluster_index = offset >> s->cluster_bits; 690 for(i = 0; i < nb_clusters; i++) { 691 refcount = get_refcount(bs, cluster_index++); 692 693 if (refcount < 0) { 694 return refcount; 695 } else if (refcount != 0) { 696 break; 697 } 698 } 699 700 /* And then allocate them */ 701 old_free_cluster_index = s->free_cluster_index; 702 s->free_cluster_index = cluster_index + i; 703 704 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, 705 QCOW2_DISCARD_NEVER); 706 if (ret < 0) { 707 return ret; 708 } 709 710 s->free_cluster_index = old_free_cluster_index; 711 712 return i; 713 } 714 715 /* only used to allocate compressed sectors. We try to allocate 716 contiguous sectors. size must be <= cluster_size */ 717 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) 718 { 719 BDRVQcowState *s = bs->opaque; 720 int64_t offset, cluster_offset; 721 int free_in_cluster; 722 723 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); 724 assert(size > 0 && size <= s->cluster_size); 725 if (s->free_byte_offset == 0) { 726 offset = qcow2_alloc_clusters(bs, s->cluster_size); 727 if (offset < 0) { 728 return offset; 729 } 730 s->free_byte_offset = offset; 731 } 732 redo: 733 free_in_cluster = s->cluster_size - 734 offset_into_cluster(s, s->free_byte_offset); 735 if (size <= free_in_cluster) { 736 /* enough space in current cluster */ 737 offset = s->free_byte_offset; 738 s->free_byte_offset += size; 739 free_in_cluster -= size; 740 if (free_in_cluster == 0) 741 s->free_byte_offset = 0; 742 if (offset_into_cluster(s, offset) != 0) 743 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1, 744 QCOW2_DISCARD_NEVER); 745 } else { 746 offset = qcow2_alloc_clusters(bs, s->cluster_size); 747 if (offset < 0) { 748 return offset; 749 } 750 cluster_offset = start_of_cluster(s, s->free_byte_offset); 751 if ((cluster_offset + s->cluster_size) == offset) { 752 /* we are lucky: contiguous data */ 753 offset = s->free_byte_offset; 754 qcow2_update_cluster_refcount(bs, offset >> s->cluster_bits, 1, 755 QCOW2_DISCARD_NEVER); 756 s->free_byte_offset += size; 757 } else { 758 s->free_byte_offset = offset; 759 goto redo; 760 } 761 } 762 763 /* The cluster refcount was incremented, either by qcow2_alloc_clusters() 764 * or explicitly by qcow2_update_cluster_refcount(). Refcount blocks must 765 * be flushed before the caller's L2 table updates. 766 */ 767 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); 768 return offset; 769 } 770 771 void qcow2_free_clusters(BlockDriverState *bs, 772 int64_t offset, int64_t size, 773 enum qcow2_discard_type type) 774 { 775 int ret; 776 777 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); 778 ret = update_refcount(bs, offset, size, -1, type); 779 if (ret < 0) { 780 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); 781 /* TODO Remember the clusters to free them later and avoid leaking */ 782 } 783 } 784 785 /* 786 * Free a cluster using its L2 entry (handles clusters of all types, e.g. 787 * normal cluster, compressed cluster, etc.) 788 */ 789 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry, 790 int nb_clusters, enum qcow2_discard_type type) 791 { 792 BDRVQcowState *s = bs->opaque; 793 794 switch (qcow2_get_cluster_type(l2_entry)) { 795 case QCOW2_CLUSTER_COMPRESSED: 796 { 797 int nb_csectors; 798 nb_csectors = ((l2_entry >> s->csize_shift) & 799 s->csize_mask) + 1; 800 qcow2_free_clusters(bs, 801 (l2_entry & s->cluster_offset_mask) & ~511, 802 nb_csectors * 512, type); 803 } 804 break; 805 case QCOW2_CLUSTER_NORMAL: 806 case QCOW2_CLUSTER_ZERO: 807 if (l2_entry & L2E_OFFSET_MASK) { 808 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK, 809 nb_clusters << s->cluster_bits, type); 810 } 811 break; 812 case QCOW2_CLUSTER_UNALLOCATED: 813 break; 814 default: 815 abort(); 816 } 817 } 818 819 820 821 /*********************************************************/ 822 /* snapshots and image creation */ 823 824 825 826 /* update the refcounts of snapshots and the copied flag */ 827 int qcow2_update_snapshot_refcount(BlockDriverState *bs, 828 int64_t l1_table_offset, int l1_size, int addend) 829 { 830 BDRVQcowState *s = bs->opaque; 831 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated; 832 int64_t old_offset, old_l2_offset; 833 int i, j, l1_modified = 0, nb_csectors, refcount; 834 int ret; 835 836 l2_table = NULL; 837 l1_table = NULL; 838 l1_size2 = l1_size * sizeof(uint64_t); 839 840 s->cache_discards = true; 841 842 /* WARNING: qcow2_snapshot_goto relies on this function not using the 843 * l1_table_offset when it is the current s->l1_table_offset! Be careful 844 * when changing this! */ 845 if (l1_table_offset != s->l1_table_offset) { 846 l1_table = g_malloc0(align_offset(l1_size2, 512)); 847 l1_allocated = 1; 848 849 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); 850 if (ret < 0) { 851 goto fail; 852 } 853 854 for(i = 0;i < l1_size; i++) 855 be64_to_cpus(&l1_table[i]); 856 } else { 857 assert(l1_size == s->l1_size); 858 l1_table = s->l1_table; 859 l1_allocated = 0; 860 } 861 862 for(i = 0; i < l1_size; i++) { 863 l2_offset = l1_table[i]; 864 if (l2_offset) { 865 old_l2_offset = l2_offset; 866 l2_offset &= L1E_OFFSET_MASK; 867 868 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 869 (void**) &l2_table); 870 if (ret < 0) { 871 goto fail; 872 } 873 874 for(j = 0; j < s->l2_size; j++) { 875 uint64_t cluster_index; 876 877 offset = be64_to_cpu(l2_table[j]); 878 old_offset = offset; 879 offset &= ~QCOW_OFLAG_COPIED; 880 881 switch (qcow2_get_cluster_type(offset)) { 882 case QCOW2_CLUSTER_COMPRESSED: 883 nb_csectors = ((offset >> s->csize_shift) & 884 s->csize_mask) + 1; 885 if (addend != 0) { 886 ret = update_refcount(bs, 887 (offset & s->cluster_offset_mask) & ~511, 888 nb_csectors * 512, addend, 889 QCOW2_DISCARD_SNAPSHOT); 890 if (ret < 0) { 891 goto fail; 892 } 893 } 894 /* compressed clusters are never modified */ 895 refcount = 2; 896 break; 897 898 case QCOW2_CLUSTER_NORMAL: 899 case QCOW2_CLUSTER_ZERO: 900 cluster_index = (offset & L2E_OFFSET_MASK) >> s->cluster_bits; 901 if (!cluster_index) { 902 /* unallocated */ 903 refcount = 0; 904 break; 905 } 906 if (addend != 0) { 907 refcount = qcow2_update_cluster_refcount(bs, 908 cluster_index, addend, 909 QCOW2_DISCARD_SNAPSHOT); 910 } else { 911 refcount = get_refcount(bs, cluster_index); 912 } 913 914 if (refcount < 0) { 915 ret = refcount; 916 goto fail; 917 } 918 break; 919 920 case QCOW2_CLUSTER_UNALLOCATED: 921 refcount = 0; 922 break; 923 924 default: 925 abort(); 926 } 927 928 if (refcount == 1) { 929 offset |= QCOW_OFLAG_COPIED; 930 } 931 if (offset != old_offset) { 932 if (addend > 0) { 933 qcow2_cache_set_dependency(bs, s->l2_table_cache, 934 s->refcount_block_cache); 935 } 936 l2_table[j] = cpu_to_be64(offset); 937 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 938 } 939 } 940 941 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 942 if (ret < 0) { 943 goto fail; 944 } 945 946 947 if (addend != 0) { 948 refcount = qcow2_update_cluster_refcount(bs, l2_offset >> 949 s->cluster_bits, addend, QCOW2_DISCARD_SNAPSHOT); 950 } else { 951 refcount = get_refcount(bs, l2_offset >> s->cluster_bits); 952 } 953 if (refcount < 0) { 954 ret = refcount; 955 goto fail; 956 } else if (refcount == 1) { 957 l2_offset |= QCOW_OFLAG_COPIED; 958 } 959 if (l2_offset != old_l2_offset) { 960 l1_table[i] = l2_offset; 961 l1_modified = 1; 962 } 963 } 964 } 965 966 ret = bdrv_flush(bs); 967 fail: 968 if (l2_table) { 969 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 970 } 971 972 s->cache_discards = false; 973 qcow2_process_discards(bs, ret); 974 975 /* Update L1 only if it isn't deleted anyway (addend = -1) */ 976 if (ret == 0 && addend >= 0 && l1_modified) { 977 for (i = 0; i < l1_size; i++) { 978 cpu_to_be64s(&l1_table[i]); 979 } 980 981 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_table, l1_size2); 982 983 for (i = 0; i < l1_size; i++) { 984 be64_to_cpus(&l1_table[i]); 985 } 986 } 987 if (l1_allocated) 988 g_free(l1_table); 989 return ret; 990 } 991 992 993 994 995 /*********************************************************/ 996 /* refcount checking functions */ 997 998 999 1000 /* 1001 * Increases the refcount for a range of clusters in a given refcount table. 1002 * This is used to construct a temporary refcount table out of L1 and L2 tables 1003 * which can be compared the the refcount table saved in the image. 1004 * 1005 * Modifies the number of errors in res. 1006 */ 1007 static void inc_refcounts(BlockDriverState *bs, 1008 BdrvCheckResult *res, 1009 uint16_t *refcount_table, 1010 int refcount_table_size, 1011 int64_t offset, int64_t size) 1012 { 1013 BDRVQcowState *s = bs->opaque; 1014 int64_t start, last, cluster_offset; 1015 int k; 1016 1017 if (size <= 0) 1018 return; 1019 1020 start = start_of_cluster(s, offset); 1021 last = start_of_cluster(s, offset + size - 1); 1022 for(cluster_offset = start; cluster_offset <= last; 1023 cluster_offset += s->cluster_size) { 1024 k = cluster_offset >> s->cluster_bits; 1025 if (k < 0) { 1026 fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n", 1027 cluster_offset); 1028 res->corruptions++; 1029 } else if (k >= refcount_table_size) { 1030 fprintf(stderr, "Warning: cluster offset=0x%" PRIx64 " is after " 1031 "the end of the image file, can't properly check refcounts.\n", 1032 cluster_offset); 1033 res->check_errors++; 1034 } else { 1035 if (++refcount_table[k] == 0) { 1036 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 1037 "\n", cluster_offset); 1038 res->corruptions++; 1039 } 1040 } 1041 } 1042 } 1043 1044 /* Flags for check_refcounts_l1() and check_refcounts_l2() */ 1045 enum { 1046 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */ 1047 }; 1048 1049 /* 1050 * Increases the refcount in the given refcount table for the all clusters 1051 * referenced in the L2 table. While doing so, performs some checks on L2 1052 * entries. 1053 * 1054 * Returns the number of errors found by the checks or -errno if an internal 1055 * error occurred. 1056 */ 1057 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, 1058 uint16_t *refcount_table, int refcount_table_size, int64_t l2_offset, 1059 int flags) 1060 { 1061 BDRVQcowState *s = bs->opaque; 1062 uint64_t *l2_table, l2_entry; 1063 uint64_t next_contiguous_offset = 0; 1064 int i, l2_size, nb_csectors; 1065 1066 /* Read L2 table from disk */ 1067 l2_size = s->l2_size * sizeof(uint64_t); 1068 l2_table = g_malloc(l2_size); 1069 1070 if (bdrv_pread(bs->file, l2_offset, l2_table, l2_size) != l2_size) 1071 goto fail; 1072 1073 /* Do the actual checks */ 1074 for(i = 0; i < s->l2_size; i++) { 1075 l2_entry = be64_to_cpu(l2_table[i]); 1076 1077 switch (qcow2_get_cluster_type(l2_entry)) { 1078 case QCOW2_CLUSTER_COMPRESSED: 1079 /* Compressed clusters don't have QCOW_OFLAG_COPIED */ 1080 if (l2_entry & QCOW_OFLAG_COPIED) { 1081 fprintf(stderr, "ERROR: cluster %" PRId64 ": " 1082 "copied flag must never be set for compressed " 1083 "clusters\n", l2_entry >> s->cluster_bits); 1084 l2_entry &= ~QCOW_OFLAG_COPIED; 1085 res->corruptions++; 1086 } 1087 1088 /* Mark cluster as used */ 1089 nb_csectors = ((l2_entry >> s->csize_shift) & 1090 s->csize_mask) + 1; 1091 l2_entry &= s->cluster_offset_mask; 1092 inc_refcounts(bs, res, refcount_table, refcount_table_size, 1093 l2_entry & ~511, nb_csectors * 512); 1094 1095 if (flags & CHECK_FRAG_INFO) { 1096 res->bfi.allocated_clusters++; 1097 res->bfi.compressed_clusters++; 1098 1099 /* Compressed clusters are fragmented by nature. Since they 1100 * take up sub-sector space but we only have sector granularity 1101 * I/O we need to re-read the same sectors even for adjacent 1102 * compressed clusters. 1103 */ 1104 res->bfi.fragmented_clusters++; 1105 } 1106 break; 1107 1108 case QCOW2_CLUSTER_ZERO: 1109 if ((l2_entry & L2E_OFFSET_MASK) == 0) { 1110 break; 1111 } 1112 /* fall through */ 1113 1114 case QCOW2_CLUSTER_NORMAL: 1115 { 1116 uint64_t offset = l2_entry & L2E_OFFSET_MASK; 1117 1118 if (flags & CHECK_FRAG_INFO) { 1119 res->bfi.allocated_clusters++; 1120 if (next_contiguous_offset && 1121 offset != next_contiguous_offset) { 1122 res->bfi.fragmented_clusters++; 1123 } 1124 next_contiguous_offset = offset + s->cluster_size; 1125 } 1126 1127 /* Mark cluster as used */ 1128 inc_refcounts(bs, res, refcount_table,refcount_table_size, 1129 offset, s->cluster_size); 1130 1131 /* Correct offsets are cluster aligned */ 1132 if (offset_into_cluster(s, offset)) { 1133 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not " 1134 "properly aligned; L2 entry corrupted.\n", offset); 1135 res->corruptions++; 1136 } 1137 break; 1138 } 1139 1140 case QCOW2_CLUSTER_UNALLOCATED: 1141 break; 1142 1143 default: 1144 abort(); 1145 } 1146 } 1147 1148 g_free(l2_table); 1149 return 0; 1150 1151 fail: 1152 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); 1153 g_free(l2_table); 1154 return -EIO; 1155 } 1156 1157 /* 1158 * Increases the refcount for the L1 table, its L2 tables and all referenced 1159 * clusters in the given refcount table. While doing so, performs some checks 1160 * on L1 and L2 entries. 1161 * 1162 * Returns the number of errors found by the checks or -errno if an internal 1163 * error occurred. 1164 */ 1165 static int check_refcounts_l1(BlockDriverState *bs, 1166 BdrvCheckResult *res, 1167 uint16_t *refcount_table, 1168 int refcount_table_size, 1169 int64_t l1_table_offset, int l1_size, 1170 int flags) 1171 { 1172 BDRVQcowState *s = bs->opaque; 1173 uint64_t *l1_table, l2_offset, l1_size2; 1174 int i, ret; 1175 1176 l1_size2 = l1_size * sizeof(uint64_t); 1177 1178 /* Mark L1 table as used */ 1179 inc_refcounts(bs, res, refcount_table, refcount_table_size, 1180 l1_table_offset, l1_size2); 1181 1182 /* Read L1 table entries from disk */ 1183 if (l1_size2 == 0) { 1184 l1_table = NULL; 1185 } else { 1186 l1_table = g_malloc(l1_size2); 1187 if (bdrv_pread(bs->file, l1_table_offset, 1188 l1_table, l1_size2) != l1_size2) 1189 goto fail; 1190 for(i = 0;i < l1_size; i++) 1191 be64_to_cpus(&l1_table[i]); 1192 } 1193 1194 /* Do the actual checks */ 1195 for(i = 0; i < l1_size; i++) { 1196 l2_offset = l1_table[i]; 1197 if (l2_offset) { 1198 /* Mark L2 table as used */ 1199 l2_offset &= L1E_OFFSET_MASK; 1200 inc_refcounts(bs, res, refcount_table, refcount_table_size, 1201 l2_offset, s->cluster_size); 1202 1203 /* L2 tables are cluster aligned */ 1204 if (offset_into_cluster(s, l2_offset)) { 1205 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " 1206 "cluster aligned; L1 entry corrupted\n", l2_offset); 1207 res->corruptions++; 1208 } 1209 1210 /* Process and check L2 entries */ 1211 ret = check_refcounts_l2(bs, res, refcount_table, 1212 refcount_table_size, l2_offset, flags); 1213 if (ret < 0) { 1214 goto fail; 1215 } 1216 } 1217 } 1218 g_free(l1_table); 1219 return 0; 1220 1221 fail: 1222 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); 1223 res->check_errors++; 1224 g_free(l1_table); 1225 return -EIO; 1226 } 1227 1228 /* 1229 * Checks the OFLAG_COPIED flag for all L1 and L2 entries. 1230 * 1231 * This function does not print an error message nor does it increment 1232 * check_errors if get_refcount fails (this is because such an error will have 1233 * been already detected and sufficiently signaled by the calling function 1234 * (qcow2_check_refcounts) by the time this function is called). 1235 */ 1236 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, 1237 BdrvCheckMode fix) 1238 { 1239 BDRVQcowState *s = bs->opaque; 1240 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); 1241 int ret; 1242 int refcount; 1243 int i, j; 1244 1245 for (i = 0; i < s->l1_size; i++) { 1246 uint64_t l1_entry = s->l1_table[i]; 1247 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK; 1248 bool l2_dirty = false; 1249 1250 if (!l2_offset) { 1251 continue; 1252 } 1253 1254 refcount = get_refcount(bs, l2_offset >> s->cluster_bits); 1255 if (refcount < 0) { 1256 /* don't print message nor increment check_errors */ 1257 continue; 1258 } 1259 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) { 1260 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d " 1261 "l1_entry=%" PRIx64 " refcount=%d\n", 1262 fix & BDRV_FIX_ERRORS ? "Repairing" : 1263 "ERROR", 1264 i, l1_entry, refcount); 1265 if (fix & BDRV_FIX_ERRORS) { 1266 s->l1_table[i] = refcount == 1 1267 ? l1_entry | QCOW_OFLAG_COPIED 1268 : l1_entry & ~QCOW_OFLAG_COPIED; 1269 ret = qcow2_write_l1_entry(bs, i); 1270 if (ret < 0) { 1271 res->check_errors++; 1272 goto fail; 1273 } 1274 res->corruptions_fixed++; 1275 } else { 1276 res->corruptions++; 1277 } 1278 } 1279 1280 ret = bdrv_pread(bs->file, l2_offset, l2_table, 1281 s->l2_size * sizeof(uint64_t)); 1282 if (ret < 0) { 1283 fprintf(stderr, "ERROR: Could not read L2 table: %s\n", 1284 strerror(-ret)); 1285 res->check_errors++; 1286 goto fail; 1287 } 1288 1289 for (j = 0; j < s->l2_size; j++) { 1290 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1291 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK; 1292 int cluster_type = qcow2_get_cluster_type(l2_entry); 1293 1294 if ((cluster_type == QCOW2_CLUSTER_NORMAL) || 1295 ((cluster_type == QCOW2_CLUSTER_ZERO) && (data_offset != 0))) { 1296 refcount = get_refcount(bs, data_offset >> s->cluster_bits); 1297 if (refcount < 0) { 1298 /* don't print message nor increment check_errors */ 1299 continue; 1300 } 1301 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) { 1302 fprintf(stderr, "%s OFLAG_COPIED data cluster: " 1303 "l2_entry=%" PRIx64 " refcount=%d\n", 1304 fix & BDRV_FIX_ERRORS ? "Repairing" : 1305 "ERROR", 1306 l2_entry, refcount); 1307 if (fix & BDRV_FIX_ERRORS) { 1308 l2_table[j] = cpu_to_be64(refcount == 1 1309 ? l2_entry | QCOW_OFLAG_COPIED 1310 : l2_entry & ~QCOW_OFLAG_COPIED); 1311 l2_dirty = true; 1312 res->corruptions_fixed++; 1313 } else { 1314 res->corruptions++; 1315 } 1316 } 1317 } 1318 } 1319 1320 if (l2_dirty) { 1321 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2, 1322 l2_offset, s->cluster_size); 1323 if (ret < 0) { 1324 fprintf(stderr, "ERROR: Could not write L2 table; metadata " 1325 "overlap check failed: %s\n", strerror(-ret)); 1326 res->check_errors++; 1327 goto fail; 1328 } 1329 1330 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, s->cluster_size); 1331 if (ret < 0) { 1332 fprintf(stderr, "ERROR: Could not write L2 table: %s\n", 1333 strerror(-ret)); 1334 res->check_errors++; 1335 goto fail; 1336 } 1337 } 1338 } 1339 1340 ret = 0; 1341 1342 fail: 1343 qemu_vfree(l2_table); 1344 return ret; 1345 } 1346 1347 /* 1348 * Writes one sector of the refcount table to the disk 1349 */ 1350 #define RT_ENTRIES_PER_SECTOR (512 / sizeof(uint64_t)) 1351 static int write_reftable_entry(BlockDriverState *bs, int rt_index) 1352 { 1353 BDRVQcowState *s = bs->opaque; 1354 uint64_t buf[RT_ENTRIES_PER_SECTOR]; 1355 int rt_start_index; 1356 int i, ret; 1357 1358 rt_start_index = rt_index & ~(RT_ENTRIES_PER_SECTOR - 1); 1359 for (i = 0; i < RT_ENTRIES_PER_SECTOR; i++) { 1360 buf[i] = cpu_to_be64(s->refcount_table[rt_start_index + i]); 1361 } 1362 1363 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_TABLE, 1364 s->refcount_table_offset + rt_start_index * sizeof(uint64_t), 1365 sizeof(buf)); 1366 if (ret < 0) { 1367 return ret; 1368 } 1369 1370 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE); 1371 ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset + 1372 rt_start_index * sizeof(uint64_t), buf, sizeof(buf)); 1373 if (ret < 0) { 1374 return ret; 1375 } 1376 1377 return 0; 1378 } 1379 1380 /* 1381 * Allocates a new cluster for the given refcount block (represented by its 1382 * offset in the image file) and copies the current content there. This function 1383 * does _not_ decrement the reference count for the currently occupied cluster. 1384 * 1385 * This function prints an informative message to stderr on error (and returns 1386 * -errno); on success, 0 is returned. 1387 */ 1388 static int64_t realloc_refcount_block(BlockDriverState *bs, int reftable_index, 1389 uint64_t offset) 1390 { 1391 BDRVQcowState *s = bs->opaque; 1392 int64_t new_offset = 0; 1393 void *refcount_block = NULL; 1394 int ret; 1395 1396 /* allocate new refcount block */ 1397 new_offset = qcow2_alloc_clusters(bs, s->cluster_size); 1398 if (new_offset < 0) { 1399 fprintf(stderr, "Could not allocate new cluster: %s\n", 1400 strerror(-new_offset)); 1401 ret = new_offset; 1402 goto fail; 1403 } 1404 1405 /* fetch current refcount block content */ 1406 ret = qcow2_cache_get(bs, s->refcount_block_cache, offset, &refcount_block); 1407 if (ret < 0) { 1408 fprintf(stderr, "Could not fetch refcount block: %s\n", strerror(-ret)); 1409 goto fail; 1410 } 1411 1412 /* new block has not yet been entered into refcount table, therefore it is 1413 * no refcount block yet (regarding this check) */ 1414 ret = qcow2_pre_write_overlap_check(bs, 0, new_offset, s->cluster_size); 1415 if (ret < 0) { 1416 fprintf(stderr, "Could not write refcount block; metadata overlap " 1417 "check failed: %s\n", strerror(-ret)); 1418 /* the image will be marked corrupt, so don't even attempt on freeing 1419 * the cluster */ 1420 new_offset = 0; 1421 goto fail; 1422 } 1423 1424 /* write to new block */ 1425 ret = bdrv_write(bs->file, new_offset / BDRV_SECTOR_SIZE, refcount_block, 1426 s->cluster_sectors); 1427 if (ret < 0) { 1428 fprintf(stderr, "Could not write refcount block: %s\n", strerror(-ret)); 1429 goto fail; 1430 } 1431 1432 /* update refcount table */ 1433 assert(!offset_into_cluster(s, new_offset)); 1434 s->refcount_table[reftable_index] = new_offset; 1435 ret = write_reftable_entry(bs, reftable_index); 1436 if (ret < 0) { 1437 fprintf(stderr, "Could not update refcount table: %s\n", 1438 strerror(-ret)); 1439 goto fail; 1440 } 1441 1442 fail: 1443 if (new_offset && (ret < 0)) { 1444 qcow2_free_clusters(bs, new_offset, s->cluster_size, 1445 QCOW2_DISCARD_ALWAYS); 1446 } 1447 if (refcount_block) { 1448 if (ret < 0) { 1449 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); 1450 } else { 1451 ret = qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); 1452 } 1453 } 1454 if (ret < 0) { 1455 return ret; 1456 } 1457 return new_offset; 1458 } 1459 1460 /* 1461 * Checks an image for refcount consistency. 1462 * 1463 * Returns 0 if no errors are found, the number of errors in case the image is 1464 * detected as corrupted, and -errno when an internal error occurred. 1465 */ 1466 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1467 BdrvCheckMode fix) 1468 { 1469 BDRVQcowState *s = bs->opaque; 1470 int64_t size, i, highest_cluster; 1471 int nb_clusters, refcount1, refcount2; 1472 QCowSnapshot *sn; 1473 uint16_t *refcount_table; 1474 int ret; 1475 1476 size = bdrv_getlength(bs->file); 1477 nb_clusters = size_to_clusters(s, size); 1478 refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t)); 1479 1480 res->bfi.total_clusters = 1481 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); 1482 1483 /* header */ 1484 inc_refcounts(bs, res, refcount_table, nb_clusters, 1485 0, s->cluster_size); 1486 1487 /* current L1 table */ 1488 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1489 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO); 1490 if (ret < 0) { 1491 goto fail; 1492 } 1493 1494 /* snapshots */ 1495 for(i = 0; i < s->nb_snapshots; i++) { 1496 sn = s->snapshots + i; 1497 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1498 sn->l1_table_offset, sn->l1_size, 0); 1499 if (ret < 0) { 1500 goto fail; 1501 } 1502 } 1503 inc_refcounts(bs, res, refcount_table, nb_clusters, 1504 s->snapshots_offset, s->snapshots_size); 1505 1506 /* refcount data */ 1507 inc_refcounts(bs, res, refcount_table, nb_clusters, 1508 s->refcount_table_offset, 1509 s->refcount_table_size * sizeof(uint64_t)); 1510 1511 for(i = 0; i < s->refcount_table_size; i++) { 1512 uint64_t offset, cluster; 1513 offset = s->refcount_table[i]; 1514 cluster = offset >> s->cluster_bits; 1515 1516 /* Refcount blocks are cluster aligned */ 1517 if (offset_into_cluster(s, offset)) { 1518 fprintf(stderr, "ERROR refcount block %" PRId64 " is not " 1519 "cluster aligned; refcount table entry corrupted\n", i); 1520 res->corruptions++; 1521 continue; 1522 } 1523 1524 if (cluster >= nb_clusters) { 1525 fprintf(stderr, "ERROR refcount block %" PRId64 1526 " is outside image\n", i); 1527 res->corruptions++; 1528 continue; 1529 } 1530 1531 if (offset != 0) { 1532 inc_refcounts(bs, res, refcount_table, nb_clusters, 1533 offset, s->cluster_size); 1534 if (refcount_table[cluster] != 1) { 1535 fprintf(stderr, "%s refcount block %" PRId64 1536 " refcount=%d\n", 1537 fix & BDRV_FIX_ERRORS ? "Repairing" : 1538 "ERROR", 1539 i, refcount_table[cluster]); 1540 1541 if (fix & BDRV_FIX_ERRORS) { 1542 int64_t new_offset; 1543 1544 new_offset = realloc_refcount_block(bs, i, offset); 1545 if (new_offset < 0) { 1546 res->corruptions++; 1547 continue; 1548 } 1549 1550 /* update refcounts */ 1551 if ((new_offset >> s->cluster_bits) >= nb_clusters) { 1552 /* increase refcount_table size if necessary */ 1553 int old_nb_clusters = nb_clusters; 1554 nb_clusters = (new_offset >> s->cluster_bits) + 1; 1555 refcount_table = g_realloc(refcount_table, 1556 nb_clusters * sizeof(uint16_t)); 1557 memset(&refcount_table[old_nb_clusters], 0, (nb_clusters 1558 - old_nb_clusters) * sizeof(uint16_t)); 1559 } 1560 refcount_table[cluster]--; 1561 inc_refcounts(bs, res, refcount_table, nb_clusters, 1562 new_offset, s->cluster_size); 1563 1564 res->corruptions_fixed++; 1565 } else { 1566 res->corruptions++; 1567 } 1568 } 1569 } 1570 } 1571 1572 /* compare ref counts */ 1573 for (i = 0, highest_cluster = 0; i < nb_clusters; i++) { 1574 refcount1 = get_refcount(bs, i); 1575 if (refcount1 < 0) { 1576 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", 1577 i, strerror(-refcount1)); 1578 res->check_errors++; 1579 continue; 1580 } 1581 1582 refcount2 = refcount_table[i]; 1583 1584 if (refcount1 > 0 || refcount2 > 0) { 1585 highest_cluster = i; 1586 } 1587 1588 if (refcount1 != refcount2) { 1589 1590 /* Check if we're allowed to fix the mismatch */ 1591 int *num_fixed = NULL; 1592 if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { 1593 num_fixed = &res->leaks_fixed; 1594 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { 1595 num_fixed = &res->corruptions_fixed; 1596 } 1597 1598 fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n", 1599 num_fixed != NULL ? "Repairing" : 1600 refcount1 < refcount2 ? "ERROR" : 1601 "Leaked", 1602 i, refcount1, refcount2); 1603 1604 if (num_fixed) { 1605 ret = update_refcount(bs, i << s->cluster_bits, 1, 1606 refcount2 - refcount1, 1607 QCOW2_DISCARD_ALWAYS); 1608 if (ret >= 0) { 1609 (*num_fixed)++; 1610 continue; 1611 } 1612 } 1613 1614 /* And if we couldn't, print an error */ 1615 if (refcount1 < refcount2) { 1616 res->corruptions++; 1617 } else { 1618 res->leaks++; 1619 } 1620 } 1621 } 1622 1623 /* check OFLAG_COPIED */ 1624 ret = check_oflag_copied(bs, res, fix); 1625 if (ret < 0) { 1626 goto fail; 1627 } 1628 1629 res->image_end_offset = (highest_cluster + 1) * s->cluster_size; 1630 ret = 0; 1631 1632 fail: 1633 g_free(refcount_table); 1634 1635 return ret; 1636 } 1637 1638 #define overlaps_with(ofs, sz) \ 1639 ranges_overlap(offset, size, ofs, sz) 1640 1641 /* 1642 * Checks if the given offset into the image file is actually free to use by 1643 * looking for overlaps with important metadata sections (L1/L2 tables etc.), 1644 * i.e. a sanity check without relying on the refcount tables. 1645 * 1646 * The ign parameter specifies what checks not to perform (being a bitmask of 1647 * QCow2MetadataOverlap values), i.e., what sections to ignore. 1648 * 1649 * Returns: 1650 * - 0 if writing to this offset will not affect the mentioned metadata 1651 * - a positive QCow2MetadataOverlap value indicating one overlapping section 1652 * - a negative value (-errno) indicating an error while performing a check, 1653 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2 1654 */ 1655 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, 1656 int64_t size) 1657 { 1658 BDRVQcowState *s = bs->opaque; 1659 int chk = s->overlap_check & ~ign; 1660 int i, j; 1661 1662 if (!size) { 1663 return 0; 1664 } 1665 1666 if (chk & QCOW2_OL_MAIN_HEADER) { 1667 if (offset < s->cluster_size) { 1668 return QCOW2_OL_MAIN_HEADER; 1669 } 1670 } 1671 1672 /* align range to test to cluster boundaries */ 1673 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); 1674 offset = start_of_cluster(s, offset); 1675 1676 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { 1677 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { 1678 return QCOW2_OL_ACTIVE_L1; 1679 } 1680 } 1681 1682 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { 1683 if (overlaps_with(s->refcount_table_offset, 1684 s->refcount_table_size * sizeof(uint64_t))) { 1685 return QCOW2_OL_REFCOUNT_TABLE; 1686 } 1687 } 1688 1689 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { 1690 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { 1691 return QCOW2_OL_SNAPSHOT_TABLE; 1692 } 1693 } 1694 1695 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { 1696 for (i = 0; i < s->nb_snapshots; i++) { 1697 if (s->snapshots[i].l1_size && 1698 overlaps_with(s->snapshots[i].l1_table_offset, 1699 s->snapshots[i].l1_size * sizeof(uint64_t))) { 1700 return QCOW2_OL_INACTIVE_L1; 1701 } 1702 } 1703 } 1704 1705 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { 1706 for (i = 0; i < s->l1_size; i++) { 1707 if ((s->l1_table[i] & L1E_OFFSET_MASK) && 1708 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, 1709 s->cluster_size)) { 1710 return QCOW2_OL_ACTIVE_L2; 1711 } 1712 } 1713 } 1714 1715 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { 1716 for (i = 0; i < s->refcount_table_size; i++) { 1717 if ((s->refcount_table[i] & REFT_OFFSET_MASK) && 1718 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, 1719 s->cluster_size)) { 1720 return QCOW2_OL_REFCOUNT_BLOCK; 1721 } 1722 } 1723 } 1724 1725 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { 1726 for (i = 0; i < s->nb_snapshots; i++) { 1727 uint64_t l1_ofs = s->snapshots[i].l1_table_offset; 1728 uint32_t l1_sz = s->snapshots[i].l1_size; 1729 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t); 1730 uint64_t *l1 = g_malloc(l1_sz2); 1731 int ret; 1732 1733 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); 1734 if (ret < 0) { 1735 g_free(l1); 1736 return ret; 1737 } 1738 1739 for (j = 0; j < l1_sz; j++) { 1740 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK; 1741 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { 1742 g_free(l1); 1743 return QCOW2_OL_INACTIVE_L2; 1744 } 1745 } 1746 1747 g_free(l1); 1748 } 1749 } 1750 1751 return 0; 1752 } 1753 1754 static const char *metadata_ol_names[] = { 1755 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header", 1756 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table", 1757 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table", 1758 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table", 1759 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block", 1760 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table", 1761 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table", 1762 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table", 1763 }; 1764 1765 /* 1766 * First performs a check for metadata overlaps (through 1767 * qcow2_check_metadata_overlap); if that fails with a negative value (error 1768 * while performing a check), that value is returned. If an impending overlap 1769 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt 1770 * and -EIO returned. 1771 * 1772 * Returns 0 if there were neither overlaps nor errors while checking for 1773 * overlaps; or a negative value (-errno) on error. 1774 */ 1775 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, 1776 int64_t size) 1777 { 1778 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size); 1779 1780 if (ret < 0) { 1781 return ret; 1782 } else if (ret > 0) { 1783 int metadata_ol_bitnr = ffs(ret) - 1; 1784 char *message; 1785 QObject *data; 1786 1787 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); 1788 1789 fprintf(stderr, "qcow2: Preventing invalid write on metadata (overlaps " 1790 "with %s); image marked as corrupt.\n", 1791 metadata_ol_names[metadata_ol_bitnr]); 1792 message = g_strdup_printf("Prevented %s overwrite", 1793 metadata_ol_names[metadata_ol_bitnr]); 1794 data = qobject_from_jsonf("{ 'device': %s, 'msg': %s, 'offset': %" 1795 PRId64 ", 'size': %" PRId64 " }", bs->device_name, message, 1796 offset, size); 1797 monitor_protocol_event(QEVENT_BLOCK_IMAGE_CORRUPTED, data); 1798 g_free(message); 1799 qobject_decref(data); 1800 1801 qcow2_mark_corrupt(bs); 1802 bs->drv = NULL; /* make BDS unusable */ 1803 return -EIO; 1804 } 1805 1806 return 0; 1807 } 1808