1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qapi/error.h" 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "qemu/range.h" 31 #include "qemu/bswap.h" 32 33 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size); 34 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 35 int64_t offset, int64_t length, uint64_t addend, 36 bool decrease, enum qcow2_discard_type type); 37 38 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index); 39 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index); 40 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index); 41 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index); 42 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index); 43 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index); 44 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index); 45 46 static void set_refcount_ro0(void *refcount_array, uint64_t index, 47 uint64_t value); 48 static void set_refcount_ro1(void *refcount_array, uint64_t index, 49 uint64_t value); 50 static void set_refcount_ro2(void *refcount_array, uint64_t index, 51 uint64_t value); 52 static void set_refcount_ro3(void *refcount_array, uint64_t index, 53 uint64_t value); 54 static void set_refcount_ro4(void *refcount_array, uint64_t index, 55 uint64_t value); 56 static void set_refcount_ro5(void *refcount_array, uint64_t index, 57 uint64_t value); 58 static void set_refcount_ro6(void *refcount_array, uint64_t index, 59 uint64_t value); 60 61 62 static Qcow2GetRefcountFunc *const get_refcount_funcs[] = { 63 &get_refcount_ro0, 64 &get_refcount_ro1, 65 &get_refcount_ro2, 66 &get_refcount_ro3, 67 &get_refcount_ro4, 68 &get_refcount_ro5, 69 &get_refcount_ro6 70 }; 71 72 static Qcow2SetRefcountFunc *const set_refcount_funcs[] = { 73 &set_refcount_ro0, 74 &set_refcount_ro1, 75 &set_refcount_ro2, 76 &set_refcount_ro3, 77 &set_refcount_ro4, 78 &set_refcount_ro5, 79 &set_refcount_ro6 80 }; 81 82 83 /*********************************************************/ 84 /* refcount handling */ 85 86 static void update_max_refcount_table_index(BDRVQcow2State *s) 87 { 88 unsigned i = s->refcount_table_size - 1; 89 while (i > 0 && (s->refcount_table[i] & REFT_OFFSET_MASK) == 0) { 90 i--; 91 } 92 /* Set s->max_refcount_table_index to the index of the last used entry */ 93 s->max_refcount_table_index = i; 94 } 95 96 int qcow2_refcount_init(BlockDriverState *bs) 97 { 98 BDRVQcow2State *s = bs->opaque; 99 unsigned int refcount_table_size2, i; 100 int ret; 101 102 assert(s->refcount_order >= 0 && s->refcount_order <= 6); 103 104 s->get_refcount = get_refcount_funcs[s->refcount_order]; 105 s->set_refcount = set_refcount_funcs[s->refcount_order]; 106 107 assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t)); 108 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t); 109 s->refcount_table = g_try_malloc(refcount_table_size2); 110 111 if (s->refcount_table_size > 0) { 112 if (s->refcount_table == NULL) { 113 ret = -ENOMEM; 114 goto fail; 115 } 116 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD); 117 ret = bdrv_pread(bs->file, s->refcount_table_offset, 118 s->refcount_table, refcount_table_size2); 119 if (ret < 0) { 120 goto fail; 121 } 122 for(i = 0; i < s->refcount_table_size; i++) 123 be64_to_cpus(&s->refcount_table[i]); 124 update_max_refcount_table_index(s); 125 } 126 return 0; 127 fail: 128 return ret; 129 } 130 131 void qcow2_refcount_close(BlockDriverState *bs) 132 { 133 BDRVQcow2State *s = bs->opaque; 134 g_free(s->refcount_table); 135 } 136 137 138 static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index) 139 { 140 return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1; 141 } 142 143 static void set_refcount_ro0(void *refcount_array, uint64_t index, 144 uint64_t value) 145 { 146 assert(!(value >> 1)); 147 ((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8)); 148 ((uint8_t *)refcount_array)[index / 8] |= value << (index % 8); 149 } 150 151 static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index) 152 { 153 return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4))) 154 & 0x3; 155 } 156 157 static void set_refcount_ro1(void *refcount_array, uint64_t index, 158 uint64_t value) 159 { 160 assert(!(value >> 2)); 161 ((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4))); 162 ((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4)); 163 } 164 165 static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index) 166 { 167 return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2))) 168 & 0xf; 169 } 170 171 static void set_refcount_ro2(void *refcount_array, uint64_t index, 172 uint64_t value) 173 { 174 assert(!(value >> 4)); 175 ((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2))); 176 ((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2)); 177 } 178 179 static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index) 180 { 181 return ((const uint8_t *)refcount_array)[index]; 182 } 183 184 static void set_refcount_ro3(void *refcount_array, uint64_t index, 185 uint64_t value) 186 { 187 assert(!(value >> 8)); 188 ((uint8_t *)refcount_array)[index] = value; 189 } 190 191 static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index) 192 { 193 return be16_to_cpu(((const uint16_t *)refcount_array)[index]); 194 } 195 196 static void set_refcount_ro4(void *refcount_array, uint64_t index, 197 uint64_t value) 198 { 199 assert(!(value >> 16)); 200 ((uint16_t *)refcount_array)[index] = cpu_to_be16(value); 201 } 202 203 static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index) 204 { 205 return be32_to_cpu(((const uint32_t *)refcount_array)[index]); 206 } 207 208 static void set_refcount_ro5(void *refcount_array, uint64_t index, 209 uint64_t value) 210 { 211 assert(!(value >> 32)); 212 ((uint32_t *)refcount_array)[index] = cpu_to_be32(value); 213 } 214 215 static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index) 216 { 217 return be64_to_cpu(((const uint64_t *)refcount_array)[index]); 218 } 219 220 static void set_refcount_ro6(void *refcount_array, uint64_t index, 221 uint64_t value) 222 { 223 ((uint64_t *)refcount_array)[index] = cpu_to_be64(value); 224 } 225 226 227 static int load_refcount_block(BlockDriverState *bs, 228 int64_t refcount_block_offset, 229 void **refcount_block) 230 { 231 BDRVQcow2State *s = bs->opaque; 232 233 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD); 234 return qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 235 refcount_block); 236 } 237 238 /* 239 * Retrieves the refcount of the cluster given by its index and stores it in 240 * *refcount. Returns 0 on success and -errno on failure. 241 */ 242 int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index, 243 uint64_t *refcount) 244 { 245 BDRVQcow2State *s = bs->opaque; 246 uint64_t refcount_table_index, block_index; 247 int64_t refcount_block_offset; 248 int ret; 249 void *refcount_block; 250 251 refcount_table_index = cluster_index >> s->refcount_block_bits; 252 if (refcount_table_index >= s->refcount_table_size) { 253 *refcount = 0; 254 return 0; 255 } 256 refcount_block_offset = 257 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 258 if (!refcount_block_offset) { 259 *refcount = 0; 260 return 0; 261 } 262 263 if (offset_into_cluster(s, refcount_block_offset)) { 264 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64 265 " unaligned (reftable index: %#" PRIx64 ")", 266 refcount_block_offset, refcount_table_index); 267 return -EIO; 268 } 269 270 ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset, 271 &refcount_block); 272 if (ret < 0) { 273 return ret; 274 } 275 276 block_index = cluster_index & (s->refcount_block_size - 1); 277 *refcount = s->get_refcount(refcount_block, block_index); 278 279 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); 280 281 return 0; 282 } 283 284 /* 285 * Rounds the refcount table size up to avoid growing the table for each single 286 * refcount block that is allocated. 287 */ 288 static unsigned int next_refcount_table_size(BDRVQcow2State *s, 289 unsigned int min_size) 290 { 291 unsigned int min_clusters = (min_size >> (s->cluster_bits - 3)) + 1; 292 unsigned int refcount_table_clusters = 293 MAX(1, s->refcount_table_size >> (s->cluster_bits - 3)); 294 295 while (min_clusters > refcount_table_clusters) { 296 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2; 297 } 298 299 return refcount_table_clusters << (s->cluster_bits - 3); 300 } 301 302 303 /* Checks if two offsets are described by the same refcount block */ 304 static int in_same_refcount_block(BDRVQcow2State *s, uint64_t offset_a, 305 uint64_t offset_b) 306 { 307 uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits); 308 uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits); 309 310 return (block_a == block_b); 311 } 312 313 /* 314 * Loads a refcount block. If it doesn't exist yet, it is allocated first 315 * (including growing the refcount table if needed). 316 * 317 * Returns 0 on success or -errno in error case 318 */ 319 static int alloc_refcount_block(BlockDriverState *bs, 320 int64_t cluster_index, void **refcount_block) 321 { 322 BDRVQcow2State *s = bs->opaque; 323 unsigned int refcount_table_index; 324 int ret; 325 326 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC); 327 328 /* Find the refcount block for the given cluster */ 329 refcount_table_index = cluster_index >> s->refcount_block_bits; 330 331 if (refcount_table_index < s->refcount_table_size) { 332 333 uint64_t refcount_block_offset = 334 s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK; 335 336 /* If it's already there, we're done */ 337 if (refcount_block_offset) { 338 if (offset_into_cluster(s, refcount_block_offset)) { 339 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" 340 PRIx64 " unaligned (reftable index: " 341 "%#x)", refcount_block_offset, 342 refcount_table_index); 343 return -EIO; 344 } 345 346 return load_refcount_block(bs, refcount_block_offset, 347 refcount_block); 348 } 349 } 350 351 /* 352 * If we came here, we need to allocate something. Something is at least 353 * a cluster for the new refcount block. It may also include a new refcount 354 * table if the old refcount table is too small. 355 * 356 * Note that allocating clusters here needs some special care: 357 * 358 * - We can't use the normal qcow2_alloc_clusters(), it would try to 359 * increase the refcount and very likely we would end up with an endless 360 * recursion. Instead we must place the refcount blocks in a way that 361 * they can describe them themselves. 362 * 363 * - We need to consider that at this point we are inside update_refcounts 364 * and potentially doing an initial refcount increase. This means that 365 * some clusters have already been allocated by the caller, but their 366 * refcount isn't accurate yet. If we allocate clusters for metadata, we 367 * need to return -EAGAIN to signal the caller that it needs to restart 368 * the search for free clusters. 369 * 370 * - alloc_clusters_noref and qcow2_free_clusters may load a different 371 * refcount block into the cache 372 */ 373 374 *refcount_block = NULL; 375 376 /* We write to the refcount table, so we might depend on L2 tables */ 377 ret = qcow2_cache_flush(bs, s->l2_table_cache); 378 if (ret < 0) { 379 return ret; 380 } 381 382 /* Allocate the refcount block itself and mark it as used */ 383 int64_t new_block = alloc_clusters_noref(bs, s->cluster_size); 384 if (new_block < 0) { 385 return new_block; 386 } 387 388 #ifdef DEBUG_ALLOC2 389 fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64 390 " at %" PRIx64 "\n", 391 refcount_table_index, cluster_index << s->cluster_bits, new_block); 392 #endif 393 394 if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) { 395 /* Zero the new refcount block before updating it */ 396 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 397 refcount_block); 398 if (ret < 0) { 399 goto fail_block; 400 } 401 402 memset(*refcount_block, 0, s->cluster_size); 403 404 /* The block describes itself, need to update the cache */ 405 int block_index = (new_block >> s->cluster_bits) & 406 (s->refcount_block_size - 1); 407 s->set_refcount(*refcount_block, block_index, 1); 408 } else { 409 /* Described somewhere else. This can recurse at most twice before we 410 * arrive at a block that describes itself. */ 411 ret = update_refcount(bs, new_block, s->cluster_size, 1, false, 412 QCOW2_DISCARD_NEVER); 413 if (ret < 0) { 414 goto fail_block; 415 } 416 417 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 418 if (ret < 0) { 419 goto fail_block; 420 } 421 422 /* Initialize the new refcount block only after updating its refcount, 423 * update_refcount uses the refcount cache itself */ 424 ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block, 425 refcount_block); 426 if (ret < 0) { 427 goto fail_block; 428 } 429 430 memset(*refcount_block, 0, s->cluster_size); 431 } 432 433 /* Now the new refcount block needs to be written to disk */ 434 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE); 435 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, *refcount_block); 436 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 437 if (ret < 0) { 438 goto fail_block; 439 } 440 441 /* If the refcount table is big enough, just hook the block up there */ 442 if (refcount_table_index < s->refcount_table_size) { 443 uint64_t data64 = cpu_to_be64(new_block); 444 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP); 445 ret = bdrv_pwrite_sync(bs->file, 446 s->refcount_table_offset + refcount_table_index * sizeof(uint64_t), 447 &data64, sizeof(data64)); 448 if (ret < 0) { 449 goto fail_block; 450 } 451 452 s->refcount_table[refcount_table_index] = new_block; 453 /* If there's a hole in s->refcount_table then it can happen 454 * that refcount_table_index < s->max_refcount_table_index */ 455 s->max_refcount_table_index = 456 MAX(s->max_refcount_table_index, refcount_table_index); 457 458 /* The new refcount block may be where the caller intended to put its 459 * data, so let it restart the search. */ 460 return -EAGAIN; 461 } 462 463 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block); 464 465 /* 466 * If we come here, we need to grow the refcount table. Again, a new 467 * refcount table needs some space and we can't simply allocate to avoid 468 * endless recursion. 469 * 470 * Therefore let's grab new refcount blocks at the end of the image, which 471 * will describe themselves and the new refcount table. This way we can 472 * reference them only in the new table and do the switch to the new 473 * refcount table at once without producing an inconsistent state in 474 * between. 475 */ 476 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW); 477 478 /* Calculate the number of refcount blocks needed so far; this will be the 479 * basis for calculating the index of the first cluster used for the 480 * self-describing refcount structures which we are about to create. 481 * 482 * Because we reached this point, there cannot be any refcount entries for 483 * cluster_index or higher indices yet. However, because new_block has been 484 * allocated to describe that cluster (and it will assume this role later 485 * on), we cannot use that index; also, new_block may actually have a higher 486 * cluster index than cluster_index, so it needs to be taken into account 487 * here (and 1 needs to be added to its value because that cluster is used). 488 */ 489 uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1, 490 (new_block >> s->cluster_bits) + 1), 491 s->refcount_block_size); 492 493 if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) { 494 return -EFBIG; 495 } 496 497 /* And now we need at least one block more for the new metadata */ 498 uint64_t table_size = next_refcount_table_size(s, blocks_used + 1); 499 uint64_t last_table_size; 500 uint64_t blocks_clusters; 501 do { 502 uint64_t table_clusters = 503 size_to_clusters(s, table_size * sizeof(uint64_t)); 504 blocks_clusters = 1 + 505 DIV_ROUND_UP(table_clusters, s->refcount_block_size); 506 uint64_t meta_clusters = table_clusters + blocks_clusters; 507 508 last_table_size = table_size; 509 table_size = next_refcount_table_size(s, blocks_used + 510 DIV_ROUND_UP(meta_clusters, s->refcount_block_size)); 511 512 } while (last_table_size != table_size); 513 514 #ifdef DEBUG_ALLOC2 515 fprintf(stderr, "qcow2: Grow refcount table %" PRId32 " => %" PRId64 "\n", 516 s->refcount_table_size, table_size); 517 #endif 518 519 /* Create the new refcount table and blocks */ 520 uint64_t meta_offset = (blocks_used * s->refcount_block_size) * 521 s->cluster_size; 522 uint64_t table_offset = meta_offset + blocks_clusters * s->cluster_size; 523 uint64_t *new_table = g_try_new0(uint64_t, table_size); 524 void *new_blocks = g_try_malloc0(blocks_clusters * s->cluster_size); 525 526 assert(table_size > 0 && blocks_clusters > 0); 527 if (new_table == NULL || new_blocks == NULL) { 528 ret = -ENOMEM; 529 goto fail_table; 530 } 531 532 /* Fill the new refcount table */ 533 memcpy(new_table, s->refcount_table, 534 s->refcount_table_size * sizeof(uint64_t)); 535 new_table[refcount_table_index] = new_block; 536 537 int i; 538 for (i = 0; i < blocks_clusters; i++) { 539 new_table[blocks_used + i] = meta_offset + (i * s->cluster_size); 540 } 541 542 /* Fill the refcount blocks */ 543 uint64_t table_clusters = size_to_clusters(s, table_size * sizeof(uint64_t)); 544 int block = 0; 545 for (i = 0; i < table_clusters + blocks_clusters; i++) { 546 s->set_refcount(new_blocks, block++, 1); 547 } 548 549 /* Write refcount blocks to disk */ 550 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS); 551 ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks, 552 blocks_clusters * s->cluster_size); 553 g_free(new_blocks); 554 new_blocks = NULL; 555 if (ret < 0) { 556 goto fail_table; 557 } 558 559 /* Write refcount table to disk */ 560 for(i = 0; i < table_size; i++) { 561 cpu_to_be64s(&new_table[i]); 562 } 563 564 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE); 565 ret = bdrv_pwrite_sync(bs->file, table_offset, new_table, 566 table_size * sizeof(uint64_t)); 567 if (ret < 0) { 568 goto fail_table; 569 } 570 571 for(i = 0; i < table_size; i++) { 572 be64_to_cpus(&new_table[i]); 573 } 574 575 /* Hook up the new refcount table in the qcow2 header */ 576 struct QEMU_PACKED { 577 uint64_t d64; 578 uint32_t d32; 579 } data; 580 data.d64 = cpu_to_be64(table_offset); 581 data.d32 = cpu_to_be32(table_clusters); 582 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE); 583 ret = bdrv_pwrite_sync(bs->file, 584 offsetof(QCowHeader, refcount_table_offset), 585 &data, sizeof(data)); 586 if (ret < 0) { 587 goto fail_table; 588 } 589 590 /* And switch it in memory */ 591 uint64_t old_table_offset = s->refcount_table_offset; 592 uint64_t old_table_size = s->refcount_table_size; 593 594 g_free(s->refcount_table); 595 s->refcount_table = new_table; 596 s->refcount_table_size = table_size; 597 s->refcount_table_offset = table_offset; 598 update_max_refcount_table_index(s); 599 600 /* Free old table. */ 601 qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t), 602 QCOW2_DISCARD_OTHER); 603 604 ret = load_refcount_block(bs, new_block, refcount_block); 605 if (ret < 0) { 606 return ret; 607 } 608 609 /* If we were trying to do the initial refcount update for some cluster 610 * allocation, we might have used the same clusters to store newly 611 * allocated metadata. Make the caller search some new space. */ 612 return -EAGAIN; 613 614 fail_table: 615 g_free(new_blocks); 616 g_free(new_table); 617 fail_block: 618 if (*refcount_block != NULL) { 619 qcow2_cache_put(bs, s->refcount_block_cache, refcount_block); 620 } 621 return ret; 622 } 623 624 void qcow2_process_discards(BlockDriverState *bs, int ret) 625 { 626 BDRVQcow2State *s = bs->opaque; 627 Qcow2DiscardRegion *d, *next; 628 629 QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) { 630 QTAILQ_REMOVE(&s->discards, d, next); 631 632 /* Discard is optional, ignore the return value */ 633 if (ret >= 0) { 634 bdrv_pdiscard(bs->file->bs, d->offset, d->bytes); 635 } 636 637 g_free(d); 638 } 639 } 640 641 static void update_refcount_discard(BlockDriverState *bs, 642 uint64_t offset, uint64_t length) 643 { 644 BDRVQcow2State *s = bs->opaque; 645 Qcow2DiscardRegion *d, *p, *next; 646 647 QTAILQ_FOREACH(d, &s->discards, next) { 648 uint64_t new_start = MIN(offset, d->offset); 649 uint64_t new_end = MAX(offset + length, d->offset + d->bytes); 650 651 if (new_end - new_start <= length + d->bytes) { 652 /* There can't be any overlap, areas ending up here have no 653 * references any more and therefore shouldn't get freed another 654 * time. */ 655 assert(d->bytes + length == new_end - new_start); 656 d->offset = new_start; 657 d->bytes = new_end - new_start; 658 goto found; 659 } 660 } 661 662 d = g_malloc(sizeof(*d)); 663 *d = (Qcow2DiscardRegion) { 664 .bs = bs, 665 .offset = offset, 666 .bytes = length, 667 }; 668 QTAILQ_INSERT_TAIL(&s->discards, d, next); 669 670 found: 671 /* Merge discard requests if they are adjacent now */ 672 QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) { 673 if (p == d 674 || p->offset > d->offset + d->bytes 675 || d->offset > p->offset + p->bytes) 676 { 677 continue; 678 } 679 680 /* Still no overlap possible */ 681 assert(p->offset == d->offset + d->bytes 682 || d->offset == p->offset + p->bytes); 683 684 QTAILQ_REMOVE(&s->discards, p, next); 685 d->offset = MIN(d->offset, p->offset); 686 d->bytes += p->bytes; 687 g_free(p); 688 } 689 } 690 691 /* XXX: cache several refcount block clusters ? */ 692 /* @addend is the absolute value of the addend; if @decrease is set, @addend 693 * will be subtracted from the current refcount, otherwise it will be added */ 694 static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs, 695 int64_t offset, 696 int64_t length, 697 uint64_t addend, 698 bool decrease, 699 enum qcow2_discard_type type) 700 { 701 BDRVQcow2State *s = bs->opaque; 702 int64_t start, last, cluster_offset; 703 void *refcount_block = NULL; 704 int64_t old_table_index = -1; 705 int ret; 706 707 #ifdef DEBUG_ALLOC2 708 fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64 709 " addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "", 710 addend); 711 #endif 712 if (length < 0) { 713 return -EINVAL; 714 } else if (length == 0) { 715 return 0; 716 } 717 718 if (decrease) { 719 qcow2_cache_set_dependency(bs, s->refcount_block_cache, 720 s->l2_table_cache); 721 } 722 723 start = start_of_cluster(s, offset); 724 last = start_of_cluster(s, offset + length - 1); 725 for(cluster_offset = start; cluster_offset <= last; 726 cluster_offset += s->cluster_size) 727 { 728 int block_index; 729 uint64_t refcount; 730 int64_t cluster_index = cluster_offset >> s->cluster_bits; 731 int64_t table_index = cluster_index >> s->refcount_block_bits; 732 733 /* Load the refcount block and allocate it if needed */ 734 if (table_index != old_table_index) { 735 if (refcount_block) { 736 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); 737 } 738 ret = alloc_refcount_block(bs, cluster_index, &refcount_block); 739 if (ret < 0) { 740 goto fail; 741 } 742 } 743 old_table_index = table_index; 744 745 qcow2_cache_entry_mark_dirty(bs, s->refcount_block_cache, 746 refcount_block); 747 748 /* we can update the count and save it */ 749 block_index = cluster_index & (s->refcount_block_size - 1); 750 751 refcount = s->get_refcount(refcount_block, block_index); 752 if (decrease ? (refcount - addend > refcount) 753 : (refcount + addend < refcount || 754 refcount + addend > s->refcount_max)) 755 { 756 ret = -EINVAL; 757 goto fail; 758 } 759 if (decrease) { 760 refcount -= addend; 761 } else { 762 refcount += addend; 763 } 764 if (refcount == 0 && cluster_index < s->free_cluster_index) { 765 s->free_cluster_index = cluster_index; 766 } 767 s->set_refcount(refcount_block, block_index, refcount); 768 769 if (refcount == 0 && s->discard_passthrough[type]) { 770 update_refcount_discard(bs, cluster_offset, s->cluster_size); 771 } 772 } 773 774 ret = 0; 775 fail: 776 if (!s->cache_discards) { 777 qcow2_process_discards(bs, ret); 778 } 779 780 /* Write last changed block to disk */ 781 if (refcount_block) { 782 qcow2_cache_put(bs, s->refcount_block_cache, &refcount_block); 783 } 784 785 /* 786 * Try do undo any updates if an error is returned (This may succeed in 787 * some cases like ENOSPC for allocating a new refcount block) 788 */ 789 if (ret < 0) { 790 int dummy; 791 dummy = update_refcount(bs, offset, cluster_offset - offset, addend, 792 !decrease, QCOW2_DISCARD_NEVER); 793 (void)dummy; 794 } 795 796 return ret; 797 } 798 799 /* 800 * Increases or decreases the refcount of a given cluster. 801 * 802 * @addend is the absolute value of the addend; if @decrease is set, @addend 803 * will be subtracted from the current refcount, otherwise it will be added. 804 * 805 * On success 0 is returned; on failure -errno is returned. 806 */ 807 int qcow2_update_cluster_refcount(BlockDriverState *bs, 808 int64_t cluster_index, 809 uint64_t addend, bool decrease, 810 enum qcow2_discard_type type) 811 { 812 BDRVQcow2State *s = bs->opaque; 813 int ret; 814 815 ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend, 816 decrease, type); 817 if (ret < 0) { 818 return ret; 819 } 820 821 return 0; 822 } 823 824 825 826 /*********************************************************/ 827 /* cluster allocation functions */ 828 829 830 831 /* return < 0 if error */ 832 static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size) 833 { 834 BDRVQcow2State *s = bs->opaque; 835 uint64_t i, nb_clusters, refcount; 836 int ret; 837 838 /* We can't allocate clusters if they may still be queued for discard. */ 839 if (s->cache_discards) { 840 qcow2_process_discards(bs, 0); 841 } 842 843 nb_clusters = size_to_clusters(s, size); 844 retry: 845 for(i = 0; i < nb_clusters; i++) { 846 uint64_t next_cluster_index = s->free_cluster_index++; 847 ret = qcow2_get_refcount(bs, next_cluster_index, &refcount); 848 849 if (ret < 0) { 850 return ret; 851 } else if (refcount != 0) { 852 goto retry; 853 } 854 } 855 856 /* Make sure that all offsets in the "allocated" range are representable 857 * in an int64_t */ 858 if (s->free_cluster_index > 0 && 859 s->free_cluster_index - 1 > (INT64_MAX >> s->cluster_bits)) 860 { 861 return -EFBIG; 862 } 863 864 #ifdef DEBUG_ALLOC2 865 fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n", 866 size, 867 (s->free_cluster_index - nb_clusters) << s->cluster_bits); 868 #endif 869 return (s->free_cluster_index - nb_clusters) << s->cluster_bits; 870 } 871 872 int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size) 873 { 874 int64_t offset; 875 int ret; 876 877 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC); 878 do { 879 offset = alloc_clusters_noref(bs, size); 880 if (offset < 0) { 881 return offset; 882 } 883 884 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER); 885 } while (ret == -EAGAIN); 886 887 if (ret < 0) { 888 return ret; 889 } 890 891 return offset; 892 } 893 894 int64_t qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset, 895 int64_t nb_clusters) 896 { 897 BDRVQcow2State *s = bs->opaque; 898 uint64_t cluster_index, refcount; 899 uint64_t i; 900 int ret; 901 902 assert(nb_clusters >= 0); 903 if (nb_clusters == 0) { 904 return 0; 905 } 906 907 do { 908 /* Check how many clusters there are free */ 909 cluster_index = offset >> s->cluster_bits; 910 for(i = 0; i < nb_clusters; i++) { 911 ret = qcow2_get_refcount(bs, cluster_index++, &refcount); 912 if (ret < 0) { 913 return ret; 914 } else if (refcount != 0) { 915 break; 916 } 917 } 918 919 /* And then allocate them */ 920 ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false, 921 QCOW2_DISCARD_NEVER); 922 } while (ret == -EAGAIN); 923 924 if (ret < 0) { 925 return ret; 926 } 927 928 return i; 929 } 930 931 /* only used to allocate compressed sectors. We try to allocate 932 contiguous sectors. size must be <= cluster_size */ 933 int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size) 934 { 935 BDRVQcow2State *s = bs->opaque; 936 int64_t offset; 937 size_t free_in_cluster; 938 int ret; 939 940 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES); 941 assert(size > 0 && size <= s->cluster_size); 942 assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset)); 943 944 offset = s->free_byte_offset; 945 946 if (offset) { 947 uint64_t refcount; 948 ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount); 949 if (ret < 0) { 950 return ret; 951 } 952 953 if (refcount == s->refcount_max) { 954 offset = 0; 955 } 956 } 957 958 free_in_cluster = s->cluster_size - offset_into_cluster(s, offset); 959 do { 960 if (!offset || free_in_cluster < size) { 961 int64_t new_cluster = alloc_clusters_noref(bs, s->cluster_size); 962 if (new_cluster < 0) { 963 return new_cluster; 964 } 965 966 if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) { 967 offset = new_cluster; 968 free_in_cluster = s->cluster_size; 969 } else { 970 free_in_cluster += s->cluster_size; 971 } 972 } 973 974 assert(offset); 975 ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER); 976 if (ret < 0) { 977 offset = 0; 978 } 979 } while (ret == -EAGAIN); 980 if (ret < 0) { 981 return ret; 982 } 983 984 /* The cluster refcount was incremented; refcount blocks must be flushed 985 * before the caller's L2 table updates. */ 986 qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); 987 988 s->free_byte_offset = offset + size; 989 if (!offset_into_cluster(s, s->free_byte_offset)) { 990 s->free_byte_offset = 0; 991 } 992 993 return offset; 994 } 995 996 void qcow2_free_clusters(BlockDriverState *bs, 997 int64_t offset, int64_t size, 998 enum qcow2_discard_type type) 999 { 1000 int ret; 1001 1002 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE); 1003 ret = update_refcount(bs, offset, size, 1, true, type); 1004 if (ret < 0) { 1005 fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret)); 1006 /* TODO Remember the clusters to free them later and avoid leaking */ 1007 } 1008 } 1009 1010 /* 1011 * Free a cluster using its L2 entry (handles clusters of all types, e.g. 1012 * normal cluster, compressed cluster, etc.) 1013 */ 1014 void qcow2_free_any_clusters(BlockDriverState *bs, uint64_t l2_entry, 1015 int nb_clusters, enum qcow2_discard_type type) 1016 { 1017 BDRVQcow2State *s = bs->opaque; 1018 1019 switch (qcow2_get_cluster_type(l2_entry)) { 1020 case QCOW2_CLUSTER_COMPRESSED: 1021 { 1022 int nb_csectors; 1023 nb_csectors = ((l2_entry >> s->csize_shift) & 1024 s->csize_mask) + 1; 1025 qcow2_free_clusters(bs, 1026 (l2_entry & s->cluster_offset_mask) & ~511, 1027 nb_csectors * 512, type); 1028 } 1029 break; 1030 case QCOW2_CLUSTER_NORMAL: 1031 case QCOW2_CLUSTER_ZERO_ALLOC: 1032 if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) { 1033 qcow2_signal_corruption(bs, false, -1, -1, 1034 "Cannot free unaligned cluster %#llx", 1035 l2_entry & L2E_OFFSET_MASK); 1036 } else { 1037 qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK, 1038 nb_clusters << s->cluster_bits, type); 1039 } 1040 break; 1041 case QCOW2_CLUSTER_ZERO_PLAIN: 1042 case QCOW2_CLUSTER_UNALLOCATED: 1043 break; 1044 default: 1045 abort(); 1046 } 1047 } 1048 1049 1050 1051 /*********************************************************/ 1052 /* snapshots and image creation */ 1053 1054 1055 1056 /* update the refcounts of snapshots and the copied flag */ 1057 int qcow2_update_snapshot_refcount(BlockDriverState *bs, 1058 int64_t l1_table_offset, int l1_size, int addend) 1059 { 1060 BDRVQcow2State *s = bs->opaque; 1061 uint64_t *l1_table, *l2_table, l2_offset, entry, l1_size2, refcount; 1062 bool l1_allocated = false; 1063 int64_t old_entry, old_l2_offset; 1064 int i, j, l1_modified = 0, nb_csectors; 1065 int ret; 1066 1067 assert(addend >= -1 && addend <= 1); 1068 1069 l2_table = NULL; 1070 l1_table = NULL; 1071 l1_size2 = l1_size * sizeof(uint64_t); 1072 1073 s->cache_discards = true; 1074 1075 /* WARNING: qcow2_snapshot_goto relies on this function not using the 1076 * l1_table_offset when it is the current s->l1_table_offset! Be careful 1077 * when changing this! */ 1078 if (l1_table_offset != s->l1_table_offset) { 1079 l1_table = g_try_malloc0(align_offset(l1_size2, 512)); 1080 if (l1_size2 && l1_table == NULL) { 1081 ret = -ENOMEM; 1082 goto fail; 1083 } 1084 l1_allocated = true; 1085 1086 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); 1087 if (ret < 0) { 1088 goto fail; 1089 } 1090 1091 for (i = 0; i < l1_size; i++) { 1092 be64_to_cpus(&l1_table[i]); 1093 } 1094 } else { 1095 assert(l1_size == s->l1_size); 1096 l1_table = s->l1_table; 1097 l1_allocated = false; 1098 } 1099 1100 for (i = 0; i < l1_size; i++) { 1101 l2_offset = l1_table[i]; 1102 if (l2_offset) { 1103 old_l2_offset = l2_offset; 1104 l2_offset &= L1E_OFFSET_MASK; 1105 1106 if (offset_into_cluster(s, l2_offset)) { 1107 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1108 PRIx64 " unaligned (L1 index: %#x)", 1109 l2_offset, i); 1110 ret = -EIO; 1111 goto fail; 1112 } 1113 1114 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1115 (void**) &l2_table); 1116 if (ret < 0) { 1117 goto fail; 1118 } 1119 1120 for (j = 0; j < s->l2_size; j++) { 1121 uint64_t cluster_index; 1122 uint64_t offset; 1123 1124 entry = be64_to_cpu(l2_table[j]); 1125 old_entry = entry; 1126 entry &= ~QCOW_OFLAG_COPIED; 1127 offset = entry & L2E_OFFSET_MASK; 1128 1129 switch (qcow2_get_cluster_type(entry)) { 1130 case QCOW2_CLUSTER_COMPRESSED: 1131 nb_csectors = ((entry >> s->csize_shift) & 1132 s->csize_mask) + 1; 1133 if (addend != 0) { 1134 ret = update_refcount(bs, 1135 (entry & s->cluster_offset_mask) & ~511, 1136 nb_csectors * 512, abs(addend), addend < 0, 1137 QCOW2_DISCARD_SNAPSHOT); 1138 if (ret < 0) { 1139 goto fail; 1140 } 1141 } 1142 /* compressed clusters are never modified */ 1143 refcount = 2; 1144 break; 1145 1146 case QCOW2_CLUSTER_NORMAL: 1147 case QCOW2_CLUSTER_ZERO_ALLOC: 1148 if (offset_into_cluster(s, offset)) { 1149 qcow2_signal_corruption(bs, true, -1, -1, "Cluster " 1150 "allocation offset %#" PRIx64 1151 " unaligned (L2 offset: %#" 1152 PRIx64 ", L2 index: %#x)", 1153 offset, l2_offset, j); 1154 ret = -EIO; 1155 goto fail; 1156 } 1157 1158 cluster_index = offset >> s->cluster_bits; 1159 assert(cluster_index); 1160 if (addend != 0) { 1161 ret = qcow2_update_cluster_refcount(bs, 1162 cluster_index, abs(addend), addend < 0, 1163 QCOW2_DISCARD_SNAPSHOT); 1164 if (ret < 0) { 1165 goto fail; 1166 } 1167 } 1168 1169 ret = qcow2_get_refcount(bs, cluster_index, &refcount); 1170 if (ret < 0) { 1171 goto fail; 1172 } 1173 break; 1174 1175 case QCOW2_CLUSTER_ZERO_PLAIN: 1176 case QCOW2_CLUSTER_UNALLOCATED: 1177 refcount = 0; 1178 break; 1179 1180 default: 1181 abort(); 1182 } 1183 1184 if (refcount == 1) { 1185 entry |= QCOW_OFLAG_COPIED; 1186 } 1187 if (entry != old_entry) { 1188 if (addend > 0) { 1189 qcow2_cache_set_dependency(bs, s->l2_table_cache, 1190 s->refcount_block_cache); 1191 } 1192 l2_table[j] = cpu_to_be64(entry); 1193 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, 1194 l2_table); 1195 } 1196 } 1197 1198 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1199 1200 if (addend != 0) { 1201 ret = qcow2_update_cluster_refcount(bs, l2_offset >> 1202 s->cluster_bits, 1203 abs(addend), addend < 0, 1204 QCOW2_DISCARD_SNAPSHOT); 1205 if (ret < 0) { 1206 goto fail; 1207 } 1208 } 1209 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1210 &refcount); 1211 if (ret < 0) { 1212 goto fail; 1213 } else if (refcount == 1) { 1214 l2_offset |= QCOW_OFLAG_COPIED; 1215 } 1216 if (l2_offset != old_l2_offset) { 1217 l1_table[i] = l2_offset; 1218 l1_modified = 1; 1219 } 1220 } 1221 } 1222 1223 ret = bdrv_flush(bs); 1224 fail: 1225 if (l2_table) { 1226 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1227 } 1228 1229 s->cache_discards = false; 1230 qcow2_process_discards(bs, ret); 1231 1232 /* Update L1 only if it isn't deleted anyway (addend = -1) */ 1233 if (ret == 0 && addend >= 0 && l1_modified) { 1234 for (i = 0; i < l1_size; i++) { 1235 cpu_to_be64s(&l1_table[i]); 1236 } 1237 1238 ret = bdrv_pwrite_sync(bs->file, l1_table_offset, 1239 l1_table, l1_size2); 1240 1241 for (i = 0; i < l1_size; i++) { 1242 be64_to_cpus(&l1_table[i]); 1243 } 1244 } 1245 if (l1_allocated) 1246 g_free(l1_table); 1247 return ret; 1248 } 1249 1250 1251 1252 1253 /*********************************************************/ 1254 /* refcount checking functions */ 1255 1256 1257 static uint64_t refcount_array_byte_size(BDRVQcow2State *s, uint64_t entries) 1258 { 1259 /* This assertion holds because there is no way we can address more than 1260 * 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because 1261 * offsets have to be representable in bytes); due to every cluster 1262 * corresponding to one refcount entry, we are well below that limit */ 1263 assert(entries < (UINT64_C(1) << (64 - 9))); 1264 1265 /* Thanks to the assertion this will not overflow, because 1266 * s->refcount_order < 7. 1267 * (note: x << s->refcount_order == x * s->refcount_bits) */ 1268 return DIV_ROUND_UP(entries << s->refcount_order, 8); 1269 } 1270 1271 /** 1272 * Reallocates *array so that it can hold new_size entries. *size must contain 1273 * the current number of entries in *array. If the reallocation fails, *array 1274 * and *size will not be modified and -errno will be returned. If the 1275 * reallocation is successful, *array will be set to the new buffer, *size 1276 * will be set to new_size and 0 will be returned. The size of the reallocated 1277 * refcount array buffer will be aligned to a cluster boundary, and the newly 1278 * allocated area will be zeroed. 1279 */ 1280 static int realloc_refcount_array(BDRVQcow2State *s, void **array, 1281 int64_t *size, int64_t new_size) 1282 { 1283 int64_t old_byte_size, new_byte_size; 1284 void *new_ptr; 1285 1286 /* Round to clusters so the array can be directly written to disk */ 1287 old_byte_size = size_to_clusters(s, refcount_array_byte_size(s, *size)) 1288 * s->cluster_size; 1289 new_byte_size = size_to_clusters(s, refcount_array_byte_size(s, new_size)) 1290 * s->cluster_size; 1291 1292 if (new_byte_size == old_byte_size) { 1293 *size = new_size; 1294 return 0; 1295 } 1296 1297 assert(new_byte_size > 0); 1298 1299 if (new_byte_size > SIZE_MAX) { 1300 return -ENOMEM; 1301 } 1302 1303 new_ptr = g_try_realloc(*array, new_byte_size); 1304 if (!new_ptr) { 1305 return -ENOMEM; 1306 } 1307 1308 if (new_byte_size > old_byte_size) { 1309 memset((char *)new_ptr + old_byte_size, 0, 1310 new_byte_size - old_byte_size); 1311 } 1312 1313 *array = new_ptr; 1314 *size = new_size; 1315 1316 return 0; 1317 } 1318 1319 /* 1320 * Increases the refcount for a range of clusters in a given refcount table. 1321 * This is used to construct a temporary refcount table out of L1 and L2 tables 1322 * which can be compared to the refcount table saved in the image. 1323 * 1324 * Modifies the number of errors in res. 1325 */ 1326 static int inc_refcounts(BlockDriverState *bs, 1327 BdrvCheckResult *res, 1328 void **refcount_table, 1329 int64_t *refcount_table_size, 1330 int64_t offset, int64_t size) 1331 { 1332 BDRVQcow2State *s = bs->opaque; 1333 uint64_t start, last, cluster_offset, k, refcount; 1334 int ret; 1335 1336 if (size <= 0) { 1337 return 0; 1338 } 1339 1340 start = start_of_cluster(s, offset); 1341 last = start_of_cluster(s, offset + size - 1); 1342 for(cluster_offset = start; cluster_offset <= last; 1343 cluster_offset += s->cluster_size) { 1344 k = cluster_offset >> s->cluster_bits; 1345 if (k >= *refcount_table_size) { 1346 ret = realloc_refcount_array(s, refcount_table, 1347 refcount_table_size, k + 1); 1348 if (ret < 0) { 1349 res->check_errors++; 1350 return ret; 1351 } 1352 } 1353 1354 refcount = s->get_refcount(*refcount_table, k); 1355 if (refcount == s->refcount_max) { 1356 fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64 1357 "\n", cluster_offset); 1358 fprintf(stderr, "Use qemu-img amend to increase the refcount entry " 1359 "width or qemu-img convert to create a clean copy if the " 1360 "image cannot be opened for writing\n"); 1361 res->corruptions++; 1362 continue; 1363 } 1364 s->set_refcount(*refcount_table, k, refcount + 1); 1365 } 1366 1367 return 0; 1368 } 1369 1370 /* Flags for check_refcounts_l1() and check_refcounts_l2() */ 1371 enum { 1372 CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */ 1373 }; 1374 1375 /* 1376 * Increases the refcount in the given refcount table for the all clusters 1377 * referenced in the L2 table. While doing so, performs some checks on L2 1378 * entries. 1379 * 1380 * Returns the number of errors found by the checks or -errno if an internal 1381 * error occurred. 1382 */ 1383 static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res, 1384 void **refcount_table, 1385 int64_t *refcount_table_size, int64_t l2_offset, 1386 int flags) 1387 { 1388 BDRVQcow2State *s = bs->opaque; 1389 uint64_t *l2_table, l2_entry; 1390 uint64_t next_contiguous_offset = 0; 1391 int i, l2_size, nb_csectors, ret; 1392 1393 /* Read L2 table from disk */ 1394 l2_size = s->l2_size * sizeof(uint64_t); 1395 l2_table = g_malloc(l2_size); 1396 1397 ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size); 1398 if (ret < 0) { 1399 fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n"); 1400 res->check_errors++; 1401 goto fail; 1402 } 1403 1404 /* Do the actual checks */ 1405 for(i = 0; i < s->l2_size; i++) { 1406 l2_entry = be64_to_cpu(l2_table[i]); 1407 1408 switch (qcow2_get_cluster_type(l2_entry)) { 1409 case QCOW2_CLUSTER_COMPRESSED: 1410 /* Compressed clusters don't have QCOW_OFLAG_COPIED */ 1411 if (l2_entry & QCOW_OFLAG_COPIED) { 1412 fprintf(stderr, "ERROR: cluster %" PRId64 ": " 1413 "copied flag must never be set for compressed " 1414 "clusters\n", l2_entry >> s->cluster_bits); 1415 l2_entry &= ~QCOW_OFLAG_COPIED; 1416 res->corruptions++; 1417 } 1418 1419 /* Mark cluster as used */ 1420 nb_csectors = ((l2_entry >> s->csize_shift) & 1421 s->csize_mask) + 1; 1422 l2_entry &= s->cluster_offset_mask; 1423 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1424 l2_entry & ~511, nb_csectors * 512); 1425 if (ret < 0) { 1426 goto fail; 1427 } 1428 1429 if (flags & CHECK_FRAG_INFO) { 1430 res->bfi.allocated_clusters++; 1431 res->bfi.compressed_clusters++; 1432 1433 /* Compressed clusters are fragmented by nature. Since they 1434 * take up sub-sector space but we only have sector granularity 1435 * I/O we need to re-read the same sectors even for adjacent 1436 * compressed clusters. 1437 */ 1438 res->bfi.fragmented_clusters++; 1439 } 1440 break; 1441 1442 case QCOW2_CLUSTER_ZERO_ALLOC: 1443 case QCOW2_CLUSTER_NORMAL: 1444 { 1445 uint64_t offset = l2_entry & L2E_OFFSET_MASK; 1446 1447 if (flags & CHECK_FRAG_INFO) { 1448 res->bfi.allocated_clusters++; 1449 if (next_contiguous_offset && 1450 offset != next_contiguous_offset) { 1451 res->bfi.fragmented_clusters++; 1452 } 1453 next_contiguous_offset = offset + s->cluster_size; 1454 } 1455 1456 /* Mark cluster as used */ 1457 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1458 offset, s->cluster_size); 1459 if (ret < 0) { 1460 goto fail; 1461 } 1462 1463 /* Correct offsets are cluster aligned */ 1464 if (offset_into_cluster(s, offset)) { 1465 fprintf(stderr, "ERROR offset=%" PRIx64 ": Cluster is not " 1466 "properly aligned; L2 entry corrupted.\n", offset); 1467 res->corruptions++; 1468 } 1469 break; 1470 } 1471 1472 case QCOW2_CLUSTER_ZERO_PLAIN: 1473 case QCOW2_CLUSTER_UNALLOCATED: 1474 break; 1475 1476 default: 1477 abort(); 1478 } 1479 } 1480 1481 g_free(l2_table); 1482 return 0; 1483 1484 fail: 1485 g_free(l2_table); 1486 return ret; 1487 } 1488 1489 /* 1490 * Increases the refcount for the L1 table, its L2 tables and all referenced 1491 * clusters in the given refcount table. While doing so, performs some checks 1492 * on L1 and L2 entries. 1493 * 1494 * Returns the number of errors found by the checks or -errno if an internal 1495 * error occurred. 1496 */ 1497 static int check_refcounts_l1(BlockDriverState *bs, 1498 BdrvCheckResult *res, 1499 void **refcount_table, 1500 int64_t *refcount_table_size, 1501 int64_t l1_table_offset, int l1_size, 1502 int flags) 1503 { 1504 BDRVQcow2State *s = bs->opaque; 1505 uint64_t *l1_table = NULL, l2_offset, l1_size2; 1506 int i, ret; 1507 1508 l1_size2 = l1_size * sizeof(uint64_t); 1509 1510 /* Mark L1 table as used */ 1511 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1512 l1_table_offset, l1_size2); 1513 if (ret < 0) { 1514 goto fail; 1515 } 1516 1517 /* Read L1 table entries from disk */ 1518 if (l1_size2 > 0) { 1519 l1_table = g_try_malloc(l1_size2); 1520 if (l1_table == NULL) { 1521 ret = -ENOMEM; 1522 res->check_errors++; 1523 goto fail; 1524 } 1525 ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2); 1526 if (ret < 0) { 1527 fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n"); 1528 res->check_errors++; 1529 goto fail; 1530 } 1531 for(i = 0;i < l1_size; i++) 1532 be64_to_cpus(&l1_table[i]); 1533 } 1534 1535 /* Do the actual checks */ 1536 for(i = 0; i < l1_size; i++) { 1537 l2_offset = l1_table[i]; 1538 if (l2_offset) { 1539 /* Mark L2 table as used */ 1540 l2_offset &= L1E_OFFSET_MASK; 1541 ret = inc_refcounts(bs, res, refcount_table, refcount_table_size, 1542 l2_offset, s->cluster_size); 1543 if (ret < 0) { 1544 goto fail; 1545 } 1546 1547 /* L2 tables are cluster aligned */ 1548 if (offset_into_cluster(s, l2_offset)) { 1549 fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not " 1550 "cluster aligned; L1 entry corrupted\n", l2_offset); 1551 res->corruptions++; 1552 } 1553 1554 /* Process and check L2 entries */ 1555 ret = check_refcounts_l2(bs, res, refcount_table, 1556 refcount_table_size, l2_offset, flags); 1557 if (ret < 0) { 1558 goto fail; 1559 } 1560 } 1561 } 1562 g_free(l1_table); 1563 return 0; 1564 1565 fail: 1566 g_free(l1_table); 1567 return ret; 1568 } 1569 1570 /* 1571 * Checks the OFLAG_COPIED flag for all L1 and L2 entries. 1572 * 1573 * This function does not print an error message nor does it increment 1574 * check_errors if qcow2_get_refcount fails (this is because such an error will 1575 * have been already detected and sufficiently signaled by the calling function 1576 * (qcow2_check_refcounts) by the time this function is called). 1577 */ 1578 static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res, 1579 BdrvCheckMode fix) 1580 { 1581 BDRVQcow2State *s = bs->opaque; 1582 uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size); 1583 int ret; 1584 uint64_t refcount; 1585 int i, j; 1586 1587 for (i = 0; i < s->l1_size; i++) { 1588 uint64_t l1_entry = s->l1_table[i]; 1589 uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK; 1590 bool l2_dirty = false; 1591 1592 if (!l2_offset) { 1593 continue; 1594 } 1595 1596 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1597 &refcount); 1598 if (ret < 0) { 1599 /* don't print message nor increment check_errors */ 1600 continue; 1601 } 1602 if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) { 1603 fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d " 1604 "l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n", 1605 fix & BDRV_FIX_ERRORS ? "Repairing" : 1606 "ERROR", 1607 i, l1_entry, refcount); 1608 if (fix & BDRV_FIX_ERRORS) { 1609 s->l1_table[i] = refcount == 1 1610 ? l1_entry | QCOW_OFLAG_COPIED 1611 : l1_entry & ~QCOW_OFLAG_COPIED; 1612 ret = qcow2_write_l1_entry(bs, i); 1613 if (ret < 0) { 1614 res->check_errors++; 1615 goto fail; 1616 } 1617 res->corruptions_fixed++; 1618 } else { 1619 res->corruptions++; 1620 } 1621 } 1622 1623 ret = bdrv_pread(bs->file, l2_offset, l2_table, 1624 s->l2_size * sizeof(uint64_t)); 1625 if (ret < 0) { 1626 fprintf(stderr, "ERROR: Could not read L2 table: %s\n", 1627 strerror(-ret)); 1628 res->check_errors++; 1629 goto fail; 1630 } 1631 1632 for (j = 0; j < s->l2_size; j++) { 1633 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1634 uint64_t data_offset = l2_entry & L2E_OFFSET_MASK; 1635 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 1636 1637 if (cluster_type == QCOW2_CLUSTER_NORMAL || 1638 cluster_type == QCOW2_CLUSTER_ZERO_ALLOC) { 1639 ret = qcow2_get_refcount(bs, 1640 data_offset >> s->cluster_bits, 1641 &refcount); 1642 if (ret < 0) { 1643 /* don't print message nor increment check_errors */ 1644 continue; 1645 } 1646 if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) { 1647 fprintf(stderr, "%s OFLAG_COPIED data cluster: " 1648 "l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n", 1649 fix & BDRV_FIX_ERRORS ? "Repairing" : 1650 "ERROR", 1651 l2_entry, refcount); 1652 if (fix & BDRV_FIX_ERRORS) { 1653 l2_table[j] = cpu_to_be64(refcount == 1 1654 ? l2_entry | QCOW_OFLAG_COPIED 1655 : l2_entry & ~QCOW_OFLAG_COPIED); 1656 l2_dirty = true; 1657 res->corruptions_fixed++; 1658 } else { 1659 res->corruptions++; 1660 } 1661 } 1662 } 1663 } 1664 1665 if (l2_dirty) { 1666 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2, 1667 l2_offset, s->cluster_size); 1668 if (ret < 0) { 1669 fprintf(stderr, "ERROR: Could not write L2 table; metadata " 1670 "overlap check failed: %s\n", strerror(-ret)); 1671 res->check_errors++; 1672 goto fail; 1673 } 1674 1675 ret = bdrv_pwrite(bs->file, l2_offset, l2_table, 1676 s->cluster_size); 1677 if (ret < 0) { 1678 fprintf(stderr, "ERROR: Could not write L2 table: %s\n", 1679 strerror(-ret)); 1680 res->check_errors++; 1681 goto fail; 1682 } 1683 } 1684 } 1685 1686 ret = 0; 1687 1688 fail: 1689 qemu_vfree(l2_table); 1690 return ret; 1691 } 1692 1693 /* 1694 * Checks consistency of refblocks and accounts for each refblock in 1695 * *refcount_table. 1696 */ 1697 static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res, 1698 BdrvCheckMode fix, bool *rebuild, 1699 void **refcount_table, int64_t *nb_clusters) 1700 { 1701 BDRVQcow2State *s = bs->opaque; 1702 int64_t i, size; 1703 int ret; 1704 1705 for(i = 0; i < s->refcount_table_size; i++) { 1706 uint64_t offset, cluster; 1707 offset = s->refcount_table[i]; 1708 cluster = offset >> s->cluster_bits; 1709 1710 /* Refcount blocks are cluster aligned */ 1711 if (offset_into_cluster(s, offset)) { 1712 fprintf(stderr, "ERROR refcount block %" PRId64 " is not " 1713 "cluster aligned; refcount table entry corrupted\n", i); 1714 res->corruptions++; 1715 *rebuild = true; 1716 continue; 1717 } 1718 1719 if (cluster >= *nb_clusters) { 1720 fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n", 1721 fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i); 1722 1723 if (fix & BDRV_FIX_ERRORS) { 1724 int64_t new_nb_clusters; 1725 Error *local_err = NULL; 1726 1727 if (offset > INT64_MAX - s->cluster_size) { 1728 ret = -EINVAL; 1729 goto resize_fail; 1730 } 1731 1732 ret = bdrv_truncate(bs->file, offset + s->cluster_size, 1733 &local_err); 1734 if (ret < 0) { 1735 error_report_err(local_err); 1736 goto resize_fail; 1737 } 1738 size = bdrv_getlength(bs->file->bs); 1739 if (size < 0) { 1740 ret = size; 1741 goto resize_fail; 1742 } 1743 1744 new_nb_clusters = size_to_clusters(s, size); 1745 assert(new_nb_clusters >= *nb_clusters); 1746 1747 ret = realloc_refcount_array(s, refcount_table, 1748 nb_clusters, new_nb_clusters); 1749 if (ret < 0) { 1750 res->check_errors++; 1751 return ret; 1752 } 1753 1754 if (cluster >= *nb_clusters) { 1755 ret = -EINVAL; 1756 goto resize_fail; 1757 } 1758 1759 res->corruptions_fixed++; 1760 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1761 offset, s->cluster_size); 1762 if (ret < 0) { 1763 return ret; 1764 } 1765 /* No need to check whether the refcount is now greater than 1: 1766 * This area was just allocated and zeroed, so it can only be 1767 * exactly 1 after inc_refcounts() */ 1768 continue; 1769 1770 resize_fail: 1771 res->corruptions++; 1772 *rebuild = true; 1773 fprintf(stderr, "ERROR could not resize image: %s\n", 1774 strerror(-ret)); 1775 } else { 1776 res->corruptions++; 1777 } 1778 continue; 1779 } 1780 1781 if (offset != 0) { 1782 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1783 offset, s->cluster_size); 1784 if (ret < 0) { 1785 return ret; 1786 } 1787 if (s->get_refcount(*refcount_table, cluster) != 1) { 1788 fprintf(stderr, "ERROR refcount block %" PRId64 1789 " refcount=%" PRIu64 "\n", i, 1790 s->get_refcount(*refcount_table, cluster)); 1791 res->corruptions++; 1792 *rebuild = true; 1793 } 1794 } 1795 } 1796 1797 return 0; 1798 } 1799 1800 /* 1801 * Calculates an in-memory refcount table. 1802 */ 1803 static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1804 BdrvCheckMode fix, bool *rebuild, 1805 void **refcount_table, int64_t *nb_clusters) 1806 { 1807 BDRVQcow2State *s = bs->opaque; 1808 int64_t i; 1809 QCowSnapshot *sn; 1810 int ret; 1811 1812 if (!*refcount_table) { 1813 int64_t old_size = 0; 1814 ret = realloc_refcount_array(s, refcount_table, 1815 &old_size, *nb_clusters); 1816 if (ret < 0) { 1817 res->check_errors++; 1818 return ret; 1819 } 1820 } 1821 1822 /* header */ 1823 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1824 0, s->cluster_size); 1825 if (ret < 0) { 1826 return ret; 1827 } 1828 1829 /* current L1 table */ 1830 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1831 s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO); 1832 if (ret < 0) { 1833 return ret; 1834 } 1835 1836 /* snapshots */ 1837 for (i = 0; i < s->nb_snapshots; i++) { 1838 sn = s->snapshots + i; 1839 ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters, 1840 sn->l1_table_offset, sn->l1_size, 0); 1841 if (ret < 0) { 1842 return ret; 1843 } 1844 } 1845 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1846 s->snapshots_offset, s->snapshots_size); 1847 if (ret < 0) { 1848 return ret; 1849 } 1850 1851 /* refcount data */ 1852 ret = inc_refcounts(bs, res, refcount_table, nb_clusters, 1853 s->refcount_table_offset, 1854 s->refcount_table_size * sizeof(uint64_t)); 1855 if (ret < 0) { 1856 return ret; 1857 } 1858 1859 return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters); 1860 } 1861 1862 /* 1863 * Compares the actual reference count for each cluster in the image against the 1864 * refcount as reported by the refcount structures on-disk. 1865 */ 1866 static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 1867 BdrvCheckMode fix, bool *rebuild, 1868 int64_t *highest_cluster, 1869 void *refcount_table, int64_t nb_clusters) 1870 { 1871 BDRVQcow2State *s = bs->opaque; 1872 int64_t i; 1873 uint64_t refcount1, refcount2; 1874 int ret; 1875 1876 for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) { 1877 ret = qcow2_get_refcount(bs, i, &refcount1); 1878 if (ret < 0) { 1879 fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n", 1880 i, strerror(-ret)); 1881 res->check_errors++; 1882 continue; 1883 } 1884 1885 refcount2 = s->get_refcount(refcount_table, i); 1886 1887 if (refcount1 > 0 || refcount2 > 0) { 1888 *highest_cluster = i; 1889 } 1890 1891 if (refcount1 != refcount2) { 1892 /* Check if we're allowed to fix the mismatch */ 1893 int *num_fixed = NULL; 1894 if (refcount1 == 0) { 1895 *rebuild = true; 1896 } else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) { 1897 num_fixed = &res->leaks_fixed; 1898 } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) { 1899 num_fixed = &res->corruptions_fixed; 1900 } 1901 1902 fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64 1903 " reference=%" PRIu64 "\n", 1904 num_fixed != NULL ? "Repairing" : 1905 refcount1 < refcount2 ? "ERROR" : 1906 "Leaked", 1907 i, refcount1, refcount2); 1908 1909 if (num_fixed) { 1910 ret = update_refcount(bs, i << s->cluster_bits, 1, 1911 refcount_diff(refcount1, refcount2), 1912 refcount1 > refcount2, 1913 QCOW2_DISCARD_ALWAYS); 1914 if (ret >= 0) { 1915 (*num_fixed)++; 1916 continue; 1917 } 1918 } 1919 1920 /* And if we couldn't, print an error */ 1921 if (refcount1 < refcount2) { 1922 res->corruptions++; 1923 } else { 1924 res->leaks++; 1925 } 1926 } 1927 } 1928 } 1929 1930 /* 1931 * Allocates clusters using an in-memory refcount table (IMRT) in contrast to 1932 * the on-disk refcount structures. 1933 * 1934 * On input, *first_free_cluster tells where to start looking, and need not 1935 * actually be a free cluster; the returned offset will not be before that 1936 * cluster. On output, *first_free_cluster points to the first gap found, even 1937 * if that gap was too small to be used as the returned offset. 1938 * 1939 * Note that *first_free_cluster is a cluster index whereas the return value is 1940 * an offset. 1941 */ 1942 static int64_t alloc_clusters_imrt(BlockDriverState *bs, 1943 int cluster_count, 1944 void **refcount_table, 1945 int64_t *imrt_nb_clusters, 1946 int64_t *first_free_cluster) 1947 { 1948 BDRVQcow2State *s = bs->opaque; 1949 int64_t cluster = *first_free_cluster, i; 1950 bool first_gap = true; 1951 int contiguous_free_clusters; 1952 int ret; 1953 1954 /* Starting at *first_free_cluster, find a range of at least cluster_count 1955 * continuously free clusters */ 1956 for (contiguous_free_clusters = 0; 1957 cluster < *imrt_nb_clusters && 1958 contiguous_free_clusters < cluster_count; 1959 cluster++) 1960 { 1961 if (!s->get_refcount(*refcount_table, cluster)) { 1962 contiguous_free_clusters++; 1963 if (first_gap) { 1964 /* If this is the first free cluster found, update 1965 * *first_free_cluster accordingly */ 1966 *first_free_cluster = cluster; 1967 first_gap = false; 1968 } 1969 } else if (contiguous_free_clusters) { 1970 contiguous_free_clusters = 0; 1971 } 1972 } 1973 1974 /* If contiguous_free_clusters is greater than zero, it contains the number 1975 * of continuously free clusters until the current cluster; the first free 1976 * cluster in the current "gap" is therefore 1977 * cluster - contiguous_free_clusters */ 1978 1979 /* If no such range could be found, grow the in-memory refcount table 1980 * accordingly to append free clusters at the end of the image */ 1981 if (contiguous_free_clusters < cluster_count) { 1982 /* contiguous_free_clusters clusters are already empty at the image end; 1983 * we need cluster_count clusters; therefore, we have to allocate 1984 * cluster_count - contiguous_free_clusters new clusters at the end of 1985 * the image (which is the current value of cluster; note that cluster 1986 * may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond 1987 * the image end) */ 1988 ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters, 1989 cluster + cluster_count 1990 - contiguous_free_clusters); 1991 if (ret < 0) { 1992 return ret; 1993 } 1994 } 1995 1996 /* Go back to the first free cluster */ 1997 cluster -= contiguous_free_clusters; 1998 for (i = 0; i < cluster_count; i++) { 1999 s->set_refcount(*refcount_table, cluster + i, 1); 2000 } 2001 2002 return cluster << s->cluster_bits; 2003 } 2004 2005 /* 2006 * Creates a new refcount structure based solely on the in-memory information 2007 * given through *refcount_table. All necessary allocations will be reflected 2008 * in that array. 2009 * 2010 * On success, the old refcount structure is leaked (it will be covered by the 2011 * new refcount structure). 2012 */ 2013 static int rebuild_refcount_structure(BlockDriverState *bs, 2014 BdrvCheckResult *res, 2015 void **refcount_table, 2016 int64_t *nb_clusters) 2017 { 2018 BDRVQcow2State *s = bs->opaque; 2019 int64_t first_free_cluster = 0, reftable_offset = -1, cluster = 0; 2020 int64_t refblock_offset, refblock_start, refblock_index; 2021 uint32_t reftable_size = 0; 2022 uint64_t *on_disk_reftable = NULL; 2023 void *on_disk_refblock; 2024 int ret = 0; 2025 struct { 2026 uint64_t reftable_offset; 2027 uint32_t reftable_clusters; 2028 } QEMU_PACKED reftable_offset_and_clusters; 2029 2030 qcow2_cache_empty(bs, s->refcount_block_cache); 2031 2032 write_refblocks: 2033 for (; cluster < *nb_clusters; cluster++) { 2034 if (!s->get_refcount(*refcount_table, cluster)) { 2035 continue; 2036 } 2037 2038 refblock_index = cluster >> s->refcount_block_bits; 2039 refblock_start = refblock_index << s->refcount_block_bits; 2040 2041 /* Don't allocate a cluster in a refblock already written to disk */ 2042 if (first_free_cluster < refblock_start) { 2043 first_free_cluster = refblock_start; 2044 } 2045 refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table, 2046 nb_clusters, &first_free_cluster); 2047 if (refblock_offset < 0) { 2048 fprintf(stderr, "ERROR allocating refblock: %s\n", 2049 strerror(-refblock_offset)); 2050 res->check_errors++; 2051 ret = refblock_offset; 2052 goto fail; 2053 } 2054 2055 if (reftable_size <= refblock_index) { 2056 uint32_t old_reftable_size = reftable_size; 2057 uint64_t *new_on_disk_reftable; 2058 2059 reftable_size = ROUND_UP((refblock_index + 1) * sizeof(uint64_t), 2060 s->cluster_size) / sizeof(uint64_t); 2061 new_on_disk_reftable = g_try_realloc(on_disk_reftable, 2062 reftable_size * 2063 sizeof(uint64_t)); 2064 if (!new_on_disk_reftable) { 2065 res->check_errors++; 2066 ret = -ENOMEM; 2067 goto fail; 2068 } 2069 on_disk_reftable = new_on_disk_reftable; 2070 2071 memset(on_disk_reftable + old_reftable_size, 0, 2072 (reftable_size - old_reftable_size) * sizeof(uint64_t)); 2073 2074 /* The offset we have for the reftable is now no longer valid; 2075 * this will leak that range, but we can easily fix that by running 2076 * a leak-fixing check after this rebuild operation */ 2077 reftable_offset = -1; 2078 } 2079 on_disk_reftable[refblock_index] = refblock_offset; 2080 2081 /* If this is apparently the last refblock (for now), try to squeeze the 2082 * reftable in */ 2083 if (refblock_index == (*nb_clusters - 1) >> s->refcount_block_bits && 2084 reftable_offset < 0) 2085 { 2086 uint64_t reftable_clusters = size_to_clusters(s, reftable_size * 2087 sizeof(uint64_t)); 2088 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, 2089 refcount_table, nb_clusters, 2090 &first_free_cluster); 2091 if (reftable_offset < 0) { 2092 fprintf(stderr, "ERROR allocating reftable: %s\n", 2093 strerror(-reftable_offset)); 2094 res->check_errors++; 2095 ret = reftable_offset; 2096 goto fail; 2097 } 2098 } 2099 2100 ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset, 2101 s->cluster_size); 2102 if (ret < 0) { 2103 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); 2104 goto fail; 2105 } 2106 2107 /* The size of *refcount_table is always cluster-aligned, therefore the 2108 * write operation will not overflow */ 2109 on_disk_refblock = (void *)((char *) *refcount_table + 2110 refblock_index * s->cluster_size); 2111 2112 ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE, 2113 on_disk_refblock, s->cluster_sectors); 2114 if (ret < 0) { 2115 fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret)); 2116 goto fail; 2117 } 2118 2119 /* Go to the end of this refblock */ 2120 cluster = refblock_start + s->refcount_block_size - 1; 2121 } 2122 2123 if (reftable_offset < 0) { 2124 uint64_t post_refblock_start, reftable_clusters; 2125 2126 post_refblock_start = ROUND_UP(*nb_clusters, s->refcount_block_size); 2127 reftable_clusters = size_to_clusters(s, 2128 reftable_size * sizeof(uint64_t)); 2129 /* Not pretty but simple */ 2130 if (first_free_cluster < post_refblock_start) { 2131 first_free_cluster = post_refblock_start; 2132 } 2133 reftable_offset = alloc_clusters_imrt(bs, reftable_clusters, 2134 refcount_table, nb_clusters, 2135 &first_free_cluster); 2136 if (reftable_offset < 0) { 2137 fprintf(stderr, "ERROR allocating reftable: %s\n", 2138 strerror(-reftable_offset)); 2139 res->check_errors++; 2140 ret = reftable_offset; 2141 goto fail; 2142 } 2143 2144 goto write_refblocks; 2145 } 2146 2147 assert(on_disk_reftable); 2148 2149 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { 2150 cpu_to_be64s(&on_disk_reftable[refblock_index]); 2151 } 2152 2153 ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, 2154 reftable_size * sizeof(uint64_t)); 2155 if (ret < 0) { 2156 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); 2157 goto fail; 2158 } 2159 2160 assert(reftable_size < INT_MAX / sizeof(uint64_t)); 2161 ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable, 2162 reftable_size * sizeof(uint64_t)); 2163 if (ret < 0) { 2164 fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret)); 2165 goto fail; 2166 } 2167 2168 /* Enter new reftable into the image header */ 2169 reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset); 2170 reftable_offset_and_clusters.reftable_clusters = 2171 cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t))); 2172 ret = bdrv_pwrite_sync(bs->file, 2173 offsetof(QCowHeader, refcount_table_offset), 2174 &reftable_offset_and_clusters, 2175 sizeof(reftable_offset_and_clusters)); 2176 if (ret < 0) { 2177 fprintf(stderr, "ERROR setting reftable: %s\n", strerror(-ret)); 2178 goto fail; 2179 } 2180 2181 for (refblock_index = 0; refblock_index < reftable_size; refblock_index++) { 2182 be64_to_cpus(&on_disk_reftable[refblock_index]); 2183 } 2184 s->refcount_table = on_disk_reftable; 2185 s->refcount_table_offset = reftable_offset; 2186 s->refcount_table_size = reftable_size; 2187 update_max_refcount_table_index(s); 2188 2189 return 0; 2190 2191 fail: 2192 g_free(on_disk_reftable); 2193 return ret; 2194 } 2195 2196 /* 2197 * Checks an image for refcount consistency. 2198 * 2199 * Returns 0 if no errors are found, the number of errors in case the image is 2200 * detected as corrupted, and -errno when an internal error occurred. 2201 */ 2202 int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res, 2203 BdrvCheckMode fix) 2204 { 2205 BDRVQcow2State *s = bs->opaque; 2206 BdrvCheckResult pre_compare_res; 2207 int64_t size, highest_cluster, nb_clusters; 2208 void *refcount_table = NULL; 2209 bool rebuild = false; 2210 int ret; 2211 2212 size = bdrv_getlength(bs->file->bs); 2213 if (size < 0) { 2214 res->check_errors++; 2215 return size; 2216 } 2217 2218 nb_clusters = size_to_clusters(s, size); 2219 if (nb_clusters > INT_MAX) { 2220 res->check_errors++; 2221 return -EFBIG; 2222 } 2223 2224 res->bfi.total_clusters = 2225 size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE); 2226 2227 ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table, 2228 &nb_clusters); 2229 if (ret < 0) { 2230 goto fail; 2231 } 2232 2233 /* In case we don't need to rebuild the refcount structure (but want to fix 2234 * something), this function is immediately called again, in which case the 2235 * result should be ignored */ 2236 pre_compare_res = *res; 2237 compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table, 2238 nb_clusters); 2239 2240 if (rebuild && (fix & BDRV_FIX_ERRORS)) { 2241 BdrvCheckResult old_res = *res; 2242 int fresh_leaks = 0; 2243 2244 fprintf(stderr, "Rebuilding refcount structure\n"); 2245 ret = rebuild_refcount_structure(bs, res, &refcount_table, 2246 &nb_clusters); 2247 if (ret < 0) { 2248 goto fail; 2249 } 2250 2251 res->corruptions = 0; 2252 res->leaks = 0; 2253 2254 /* Because the old reftable has been exchanged for a new one the 2255 * references have to be recalculated */ 2256 rebuild = false; 2257 memset(refcount_table, 0, refcount_array_byte_size(s, nb_clusters)); 2258 ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table, 2259 &nb_clusters); 2260 if (ret < 0) { 2261 goto fail; 2262 } 2263 2264 if (fix & BDRV_FIX_LEAKS) { 2265 /* The old refcount structures are now leaked, fix it; the result 2266 * can be ignored, aside from leaks which were introduced by 2267 * rebuild_refcount_structure() that could not be fixed */ 2268 BdrvCheckResult saved_res = *res; 2269 *res = (BdrvCheckResult){ 0 }; 2270 2271 compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild, 2272 &highest_cluster, refcount_table, nb_clusters); 2273 if (rebuild) { 2274 fprintf(stderr, "ERROR rebuilt refcount structure is still " 2275 "broken\n"); 2276 } 2277 2278 /* Any leaks accounted for here were introduced by 2279 * rebuild_refcount_structure() because that function has created a 2280 * new refcount structure from scratch */ 2281 fresh_leaks = res->leaks; 2282 *res = saved_res; 2283 } 2284 2285 if (res->corruptions < old_res.corruptions) { 2286 res->corruptions_fixed += old_res.corruptions - res->corruptions; 2287 } 2288 if (res->leaks < old_res.leaks) { 2289 res->leaks_fixed += old_res.leaks - res->leaks; 2290 } 2291 res->leaks += fresh_leaks; 2292 } else if (fix) { 2293 if (rebuild) { 2294 fprintf(stderr, "ERROR need to rebuild refcount structures\n"); 2295 res->check_errors++; 2296 ret = -EIO; 2297 goto fail; 2298 } 2299 2300 if (res->leaks || res->corruptions) { 2301 *res = pre_compare_res; 2302 compare_refcounts(bs, res, fix, &rebuild, &highest_cluster, 2303 refcount_table, nb_clusters); 2304 } 2305 } 2306 2307 /* check OFLAG_COPIED */ 2308 ret = check_oflag_copied(bs, res, fix); 2309 if (ret < 0) { 2310 goto fail; 2311 } 2312 2313 res->image_end_offset = (highest_cluster + 1) * s->cluster_size; 2314 ret = 0; 2315 2316 fail: 2317 g_free(refcount_table); 2318 2319 return ret; 2320 } 2321 2322 #define overlaps_with(ofs, sz) \ 2323 ranges_overlap(offset, size, ofs, sz) 2324 2325 /* 2326 * Checks if the given offset into the image file is actually free to use by 2327 * looking for overlaps with important metadata sections (L1/L2 tables etc.), 2328 * i.e. a sanity check without relying on the refcount tables. 2329 * 2330 * The ign parameter specifies what checks not to perform (being a bitmask of 2331 * QCow2MetadataOverlap values), i.e., what sections to ignore. 2332 * 2333 * Returns: 2334 * - 0 if writing to this offset will not affect the mentioned metadata 2335 * - a positive QCow2MetadataOverlap value indicating one overlapping section 2336 * - a negative value (-errno) indicating an error while performing a check, 2337 * e.g. when bdrv_read failed on QCOW2_OL_INACTIVE_L2 2338 */ 2339 int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset, 2340 int64_t size) 2341 { 2342 BDRVQcow2State *s = bs->opaque; 2343 int chk = s->overlap_check & ~ign; 2344 int i, j; 2345 2346 if (!size) { 2347 return 0; 2348 } 2349 2350 if (chk & QCOW2_OL_MAIN_HEADER) { 2351 if (offset < s->cluster_size) { 2352 return QCOW2_OL_MAIN_HEADER; 2353 } 2354 } 2355 2356 /* align range to test to cluster boundaries */ 2357 size = align_offset(offset_into_cluster(s, offset) + size, s->cluster_size); 2358 offset = start_of_cluster(s, offset); 2359 2360 if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) { 2361 if (overlaps_with(s->l1_table_offset, s->l1_size * sizeof(uint64_t))) { 2362 return QCOW2_OL_ACTIVE_L1; 2363 } 2364 } 2365 2366 if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) { 2367 if (overlaps_with(s->refcount_table_offset, 2368 s->refcount_table_size * sizeof(uint64_t))) { 2369 return QCOW2_OL_REFCOUNT_TABLE; 2370 } 2371 } 2372 2373 if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) { 2374 if (overlaps_with(s->snapshots_offset, s->snapshots_size)) { 2375 return QCOW2_OL_SNAPSHOT_TABLE; 2376 } 2377 } 2378 2379 if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) { 2380 for (i = 0; i < s->nb_snapshots; i++) { 2381 if (s->snapshots[i].l1_size && 2382 overlaps_with(s->snapshots[i].l1_table_offset, 2383 s->snapshots[i].l1_size * sizeof(uint64_t))) { 2384 return QCOW2_OL_INACTIVE_L1; 2385 } 2386 } 2387 } 2388 2389 if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) { 2390 for (i = 0; i < s->l1_size; i++) { 2391 if ((s->l1_table[i] & L1E_OFFSET_MASK) && 2392 overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK, 2393 s->cluster_size)) { 2394 return QCOW2_OL_ACTIVE_L2; 2395 } 2396 } 2397 } 2398 2399 if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) { 2400 unsigned last_entry = s->max_refcount_table_index; 2401 assert(last_entry < s->refcount_table_size); 2402 assert(last_entry + 1 == s->refcount_table_size || 2403 (s->refcount_table[last_entry + 1] & REFT_OFFSET_MASK) == 0); 2404 for (i = 0; i <= last_entry; i++) { 2405 if ((s->refcount_table[i] & REFT_OFFSET_MASK) && 2406 overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK, 2407 s->cluster_size)) { 2408 return QCOW2_OL_REFCOUNT_BLOCK; 2409 } 2410 } 2411 } 2412 2413 if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) { 2414 for (i = 0; i < s->nb_snapshots; i++) { 2415 uint64_t l1_ofs = s->snapshots[i].l1_table_offset; 2416 uint32_t l1_sz = s->snapshots[i].l1_size; 2417 uint64_t l1_sz2 = l1_sz * sizeof(uint64_t); 2418 uint64_t *l1 = g_try_malloc(l1_sz2); 2419 int ret; 2420 2421 if (l1_sz2 && l1 == NULL) { 2422 return -ENOMEM; 2423 } 2424 2425 ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2); 2426 if (ret < 0) { 2427 g_free(l1); 2428 return ret; 2429 } 2430 2431 for (j = 0; j < l1_sz; j++) { 2432 uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK; 2433 if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) { 2434 g_free(l1); 2435 return QCOW2_OL_INACTIVE_L2; 2436 } 2437 } 2438 2439 g_free(l1); 2440 } 2441 } 2442 2443 return 0; 2444 } 2445 2446 static const char *metadata_ol_names[] = { 2447 [QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header", 2448 [QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table", 2449 [QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table", 2450 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table", 2451 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block", 2452 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table", 2453 [QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table", 2454 [QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table", 2455 }; 2456 2457 /* 2458 * First performs a check for metadata overlaps (through 2459 * qcow2_check_metadata_overlap); if that fails with a negative value (error 2460 * while performing a check), that value is returned. If an impending overlap 2461 * is detected, the BDS will be made unusable, the qcow2 file marked corrupt 2462 * and -EIO returned. 2463 * 2464 * Returns 0 if there were neither overlaps nor errors while checking for 2465 * overlaps; or a negative value (-errno) on error. 2466 */ 2467 int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset, 2468 int64_t size) 2469 { 2470 int ret = qcow2_check_metadata_overlap(bs, ign, offset, size); 2471 2472 if (ret < 0) { 2473 return ret; 2474 } else if (ret > 0) { 2475 int metadata_ol_bitnr = ctz32(ret); 2476 assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR); 2477 2478 qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid " 2479 "write on metadata (overlaps with %s)", 2480 metadata_ol_names[metadata_ol_bitnr]); 2481 return -EIO; 2482 } 2483 2484 return 0; 2485 } 2486 2487 /* A pointer to a function of this type is given to walk_over_reftable(). That 2488 * function will create refblocks and pass them to a RefblockFinishOp once they 2489 * are completed (@refblock). @refblock_empty is set if the refblock is 2490 * completely empty. 2491 * 2492 * Along with the refblock, a corresponding reftable entry is passed, in the 2493 * reftable @reftable (which may be reallocated) at @reftable_index. 2494 * 2495 * @allocated should be set to true if a new cluster has been allocated. 2496 */ 2497 typedef int (RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable, 2498 uint64_t reftable_index, uint64_t *reftable_size, 2499 void *refblock, bool refblock_empty, 2500 bool *allocated, Error **errp); 2501 2502 /** 2503 * This "operation" for walk_over_reftable() allocates the refblock on disk (if 2504 * it is not empty) and inserts its offset into the new reftable. The size of 2505 * this new reftable is increased as required. 2506 */ 2507 static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable, 2508 uint64_t reftable_index, uint64_t *reftable_size, 2509 void *refblock, bool refblock_empty, bool *allocated, 2510 Error **errp) 2511 { 2512 BDRVQcow2State *s = bs->opaque; 2513 int64_t offset; 2514 2515 if (!refblock_empty && reftable_index >= *reftable_size) { 2516 uint64_t *new_reftable; 2517 uint64_t new_reftable_size; 2518 2519 new_reftable_size = ROUND_UP(reftable_index + 1, 2520 s->cluster_size / sizeof(uint64_t)); 2521 if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) { 2522 error_setg(errp, 2523 "This operation would make the refcount table grow " 2524 "beyond the maximum size supported by QEMU, aborting"); 2525 return -ENOTSUP; 2526 } 2527 2528 new_reftable = g_try_realloc(*reftable, new_reftable_size * 2529 sizeof(uint64_t)); 2530 if (!new_reftable) { 2531 error_setg(errp, "Failed to increase reftable buffer size"); 2532 return -ENOMEM; 2533 } 2534 2535 memset(new_reftable + *reftable_size, 0, 2536 (new_reftable_size - *reftable_size) * sizeof(uint64_t)); 2537 2538 *reftable = new_reftable; 2539 *reftable_size = new_reftable_size; 2540 } 2541 2542 if (!refblock_empty && !(*reftable)[reftable_index]) { 2543 offset = qcow2_alloc_clusters(bs, s->cluster_size); 2544 if (offset < 0) { 2545 error_setg_errno(errp, -offset, "Failed to allocate refblock"); 2546 return offset; 2547 } 2548 (*reftable)[reftable_index] = offset; 2549 *allocated = true; 2550 } 2551 2552 return 0; 2553 } 2554 2555 /** 2556 * This "operation" for walk_over_reftable() writes the refblock to disk at the 2557 * offset specified by the new reftable's entry. It does not modify the new 2558 * reftable or change any refcounts. 2559 */ 2560 static int flush_refblock(BlockDriverState *bs, uint64_t **reftable, 2561 uint64_t reftable_index, uint64_t *reftable_size, 2562 void *refblock, bool refblock_empty, bool *allocated, 2563 Error **errp) 2564 { 2565 BDRVQcow2State *s = bs->opaque; 2566 int64_t offset; 2567 int ret; 2568 2569 if (reftable_index < *reftable_size && (*reftable)[reftable_index]) { 2570 offset = (*reftable)[reftable_index]; 2571 2572 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 2573 if (ret < 0) { 2574 error_setg_errno(errp, -ret, "Overlap check failed"); 2575 return ret; 2576 } 2577 2578 ret = bdrv_pwrite(bs->file, offset, refblock, s->cluster_size); 2579 if (ret < 0) { 2580 error_setg_errno(errp, -ret, "Failed to write refblock"); 2581 return ret; 2582 } 2583 } else { 2584 assert(refblock_empty); 2585 } 2586 2587 return 0; 2588 } 2589 2590 /** 2591 * This function walks over the existing reftable and every referenced refblock; 2592 * if @new_set_refcount is non-NULL, it is called for every refcount entry to 2593 * create an equal new entry in the passed @new_refblock. Once that 2594 * @new_refblock is completely filled, @operation will be called. 2595 * 2596 * @status_cb and @cb_opaque are used for the amend operation's status callback. 2597 * @index is the index of the walk_over_reftable() calls and @total is the total 2598 * number of walk_over_reftable() calls per amend operation. Both are used for 2599 * calculating the parameters for the status callback. 2600 * 2601 * @allocated is set to true if a new cluster has been allocated. 2602 */ 2603 static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable, 2604 uint64_t *new_reftable_index, 2605 uint64_t *new_reftable_size, 2606 void *new_refblock, int new_refblock_size, 2607 int new_refcount_bits, 2608 RefblockFinishOp *operation, bool *allocated, 2609 Qcow2SetRefcountFunc *new_set_refcount, 2610 BlockDriverAmendStatusCB *status_cb, 2611 void *cb_opaque, int index, int total, 2612 Error **errp) 2613 { 2614 BDRVQcow2State *s = bs->opaque; 2615 uint64_t reftable_index; 2616 bool new_refblock_empty = true; 2617 int refblock_index; 2618 int new_refblock_index = 0; 2619 int ret; 2620 2621 for (reftable_index = 0; reftable_index < s->refcount_table_size; 2622 reftable_index++) 2623 { 2624 uint64_t refblock_offset = s->refcount_table[reftable_index] 2625 & REFT_OFFSET_MASK; 2626 2627 status_cb(bs, (uint64_t)index * s->refcount_table_size + reftable_index, 2628 (uint64_t)total * s->refcount_table_size, cb_opaque); 2629 2630 if (refblock_offset) { 2631 void *refblock; 2632 2633 if (offset_into_cluster(s, refblock_offset)) { 2634 qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" 2635 PRIx64 " unaligned (reftable index: %#" 2636 PRIx64 ")", refblock_offset, 2637 reftable_index); 2638 error_setg(errp, 2639 "Image is corrupt (unaligned refblock offset)"); 2640 return -EIO; 2641 } 2642 2643 ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offset, 2644 &refblock); 2645 if (ret < 0) { 2646 error_setg_errno(errp, -ret, "Failed to retrieve refblock"); 2647 return ret; 2648 } 2649 2650 for (refblock_index = 0; refblock_index < s->refcount_block_size; 2651 refblock_index++) 2652 { 2653 uint64_t refcount; 2654 2655 if (new_refblock_index >= new_refblock_size) { 2656 /* new_refblock is now complete */ 2657 ret = operation(bs, new_reftable, *new_reftable_index, 2658 new_reftable_size, new_refblock, 2659 new_refblock_empty, allocated, errp); 2660 if (ret < 0) { 2661 qcow2_cache_put(bs, s->refcount_block_cache, &refblock); 2662 return ret; 2663 } 2664 2665 (*new_reftable_index)++; 2666 new_refblock_index = 0; 2667 new_refblock_empty = true; 2668 } 2669 2670 refcount = s->get_refcount(refblock, refblock_index); 2671 if (new_refcount_bits < 64 && refcount >> new_refcount_bits) { 2672 uint64_t offset; 2673 2674 qcow2_cache_put(bs, s->refcount_block_cache, &refblock); 2675 2676 offset = ((reftable_index << s->refcount_block_bits) 2677 + refblock_index) << s->cluster_bits; 2678 2679 error_setg(errp, "Cannot decrease refcount entry width to " 2680 "%i bits: Cluster at offset %#" PRIx64 " has a " 2681 "refcount of %" PRIu64, new_refcount_bits, 2682 offset, refcount); 2683 return -EINVAL; 2684 } 2685 2686 if (new_set_refcount) { 2687 new_set_refcount(new_refblock, new_refblock_index++, 2688 refcount); 2689 } else { 2690 new_refblock_index++; 2691 } 2692 new_refblock_empty = new_refblock_empty && refcount == 0; 2693 } 2694 2695 qcow2_cache_put(bs, s->refcount_block_cache, &refblock); 2696 } else { 2697 /* No refblock means every refcount is 0 */ 2698 for (refblock_index = 0; refblock_index < s->refcount_block_size; 2699 refblock_index++) 2700 { 2701 if (new_refblock_index >= new_refblock_size) { 2702 /* new_refblock is now complete */ 2703 ret = operation(bs, new_reftable, *new_reftable_index, 2704 new_reftable_size, new_refblock, 2705 new_refblock_empty, allocated, errp); 2706 if (ret < 0) { 2707 return ret; 2708 } 2709 2710 (*new_reftable_index)++; 2711 new_refblock_index = 0; 2712 new_refblock_empty = true; 2713 } 2714 2715 if (new_set_refcount) { 2716 new_set_refcount(new_refblock, new_refblock_index++, 0); 2717 } else { 2718 new_refblock_index++; 2719 } 2720 } 2721 } 2722 } 2723 2724 if (new_refblock_index > 0) { 2725 /* Complete the potentially existing partially filled final refblock */ 2726 if (new_set_refcount) { 2727 for (; new_refblock_index < new_refblock_size; 2728 new_refblock_index++) 2729 { 2730 new_set_refcount(new_refblock, new_refblock_index, 0); 2731 } 2732 } 2733 2734 ret = operation(bs, new_reftable, *new_reftable_index, 2735 new_reftable_size, new_refblock, new_refblock_empty, 2736 allocated, errp); 2737 if (ret < 0) { 2738 return ret; 2739 } 2740 2741 (*new_reftable_index)++; 2742 } 2743 2744 status_cb(bs, (uint64_t)(index + 1) * s->refcount_table_size, 2745 (uint64_t)total * s->refcount_table_size, cb_opaque); 2746 2747 return 0; 2748 } 2749 2750 int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order, 2751 BlockDriverAmendStatusCB *status_cb, 2752 void *cb_opaque, Error **errp) 2753 { 2754 BDRVQcow2State *s = bs->opaque; 2755 Qcow2GetRefcountFunc *new_get_refcount; 2756 Qcow2SetRefcountFunc *new_set_refcount; 2757 void *new_refblock = qemu_blockalign(bs->file->bs, s->cluster_size); 2758 uint64_t *new_reftable = NULL, new_reftable_size = 0; 2759 uint64_t *old_reftable, old_reftable_size, old_reftable_offset; 2760 uint64_t new_reftable_index = 0; 2761 uint64_t i; 2762 int64_t new_reftable_offset = 0, allocated_reftable_size = 0; 2763 int new_refblock_size, new_refcount_bits = 1 << refcount_order; 2764 int old_refcount_order; 2765 int walk_index = 0; 2766 int ret; 2767 bool new_allocation; 2768 2769 assert(s->qcow_version >= 3); 2770 assert(refcount_order >= 0 && refcount_order <= 6); 2771 2772 /* see qcow2_open() */ 2773 new_refblock_size = 1 << (s->cluster_bits - (refcount_order - 3)); 2774 2775 new_get_refcount = get_refcount_funcs[refcount_order]; 2776 new_set_refcount = set_refcount_funcs[refcount_order]; 2777 2778 2779 do { 2780 int total_walks; 2781 2782 new_allocation = false; 2783 2784 /* At least we have to do this walk and the one which writes the 2785 * refblocks; also, at least we have to do this loop here at least 2786 * twice (normally), first to do the allocations, and second to 2787 * determine that everything is correctly allocated, this then makes 2788 * three walks in total */ 2789 total_walks = MAX(walk_index + 2, 3); 2790 2791 /* First, allocate the structures so they are present in the refcount 2792 * structures */ 2793 ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index, 2794 &new_reftable_size, NULL, new_refblock_size, 2795 new_refcount_bits, &alloc_refblock, 2796 &new_allocation, NULL, status_cb, cb_opaque, 2797 walk_index++, total_walks, errp); 2798 if (ret < 0) { 2799 goto done; 2800 } 2801 2802 new_reftable_index = 0; 2803 2804 if (new_allocation) { 2805 if (new_reftable_offset) { 2806 qcow2_free_clusters(bs, new_reftable_offset, 2807 allocated_reftable_size * sizeof(uint64_t), 2808 QCOW2_DISCARD_NEVER); 2809 } 2810 2811 new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size * 2812 sizeof(uint64_t)); 2813 if (new_reftable_offset < 0) { 2814 error_setg_errno(errp, -new_reftable_offset, 2815 "Failed to allocate the new reftable"); 2816 ret = new_reftable_offset; 2817 goto done; 2818 } 2819 allocated_reftable_size = new_reftable_size; 2820 } 2821 } while (new_allocation); 2822 2823 /* Second, write the new refblocks */ 2824 ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index, 2825 &new_reftable_size, new_refblock, 2826 new_refblock_size, new_refcount_bits, 2827 &flush_refblock, &new_allocation, new_set_refcount, 2828 status_cb, cb_opaque, walk_index, walk_index + 1, 2829 errp); 2830 if (ret < 0) { 2831 goto done; 2832 } 2833 assert(!new_allocation); 2834 2835 2836 /* Write the new reftable */ 2837 ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset, 2838 new_reftable_size * sizeof(uint64_t)); 2839 if (ret < 0) { 2840 error_setg_errno(errp, -ret, "Overlap check failed"); 2841 goto done; 2842 } 2843 2844 for (i = 0; i < new_reftable_size; i++) { 2845 cpu_to_be64s(&new_reftable[i]); 2846 } 2847 2848 ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable, 2849 new_reftable_size * sizeof(uint64_t)); 2850 2851 for (i = 0; i < new_reftable_size; i++) { 2852 be64_to_cpus(&new_reftable[i]); 2853 } 2854 2855 if (ret < 0) { 2856 error_setg_errno(errp, -ret, "Failed to write the new reftable"); 2857 goto done; 2858 } 2859 2860 2861 /* Empty the refcount cache */ 2862 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 2863 if (ret < 0) { 2864 error_setg_errno(errp, -ret, "Failed to flush the refblock cache"); 2865 goto done; 2866 } 2867 2868 /* Update the image header to point to the new reftable; this only updates 2869 * the fields which are relevant to qcow2_update_header(); other fields 2870 * such as s->refcount_table or s->refcount_bits stay stale for now 2871 * (because we have to restore everything if qcow2_update_header() fails) */ 2872 old_refcount_order = s->refcount_order; 2873 old_reftable_size = s->refcount_table_size; 2874 old_reftable_offset = s->refcount_table_offset; 2875 2876 s->refcount_order = refcount_order; 2877 s->refcount_table_size = new_reftable_size; 2878 s->refcount_table_offset = new_reftable_offset; 2879 2880 ret = qcow2_update_header(bs); 2881 if (ret < 0) { 2882 s->refcount_order = old_refcount_order; 2883 s->refcount_table_size = old_reftable_size; 2884 s->refcount_table_offset = old_reftable_offset; 2885 error_setg_errno(errp, -ret, "Failed to update the qcow2 header"); 2886 goto done; 2887 } 2888 2889 /* Now update the rest of the in-memory information */ 2890 old_reftable = s->refcount_table; 2891 s->refcount_table = new_reftable; 2892 update_max_refcount_table_index(s); 2893 2894 s->refcount_bits = 1 << refcount_order; 2895 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1); 2896 s->refcount_max += s->refcount_max - 1; 2897 2898 s->refcount_block_bits = s->cluster_bits - (refcount_order - 3); 2899 s->refcount_block_size = 1 << s->refcount_block_bits; 2900 2901 s->get_refcount = new_get_refcount; 2902 s->set_refcount = new_set_refcount; 2903 2904 /* For cleaning up all old refblocks and the old reftable below the "done" 2905 * label */ 2906 new_reftable = old_reftable; 2907 new_reftable_size = old_reftable_size; 2908 new_reftable_offset = old_reftable_offset; 2909 2910 done: 2911 if (new_reftable) { 2912 /* On success, new_reftable actually points to the old reftable (and 2913 * new_reftable_size is the old reftable's size); but that is just 2914 * fine */ 2915 for (i = 0; i < new_reftable_size; i++) { 2916 uint64_t offset = new_reftable[i] & REFT_OFFSET_MASK; 2917 if (offset) { 2918 qcow2_free_clusters(bs, offset, s->cluster_size, 2919 QCOW2_DISCARD_OTHER); 2920 } 2921 } 2922 g_free(new_reftable); 2923 2924 if (new_reftable_offset > 0) { 2925 qcow2_free_clusters(bs, new_reftable_offset, 2926 new_reftable_size * sizeof(uint64_t), 2927 QCOW2_DISCARD_OTHER); 2928 } 2929 } 2930 2931 qemu_vfree(new_refblock); 2932 return ret; 2933 } 2934