1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "block/qcow2.h" 32 #include "qemu/bswap.h" 33 #include "trace.h" 34 35 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 36 bool exact_size) 37 { 38 BDRVQcow2State *s = bs->opaque; 39 int new_l1_size2, ret, i; 40 uint64_t *new_l1_table; 41 int64_t old_l1_table_offset, old_l1_size; 42 int64_t new_l1_table_offset, new_l1_size; 43 uint8_t data[12]; 44 45 if (min_size <= s->l1_size) 46 return 0; 47 48 /* Do a sanity check on min_size before trying to calculate new_l1_size 49 * (this prevents overflows during the while loop for the calculation of 50 * new_l1_size) */ 51 if (min_size > INT_MAX / sizeof(uint64_t)) { 52 return -EFBIG; 53 } 54 55 if (exact_size) { 56 new_l1_size = min_size; 57 } else { 58 /* Bump size up to reduce the number of times we have to grow */ 59 new_l1_size = s->l1_size; 60 if (new_l1_size == 0) { 61 new_l1_size = 1; 62 } 63 while (min_size > new_l1_size) { 64 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 65 } 66 } 67 68 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 69 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 70 return -EFBIG; 71 } 72 73 #ifdef DEBUG_ALLOC2 74 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 75 s->l1_size, new_l1_size); 76 #endif 77 78 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 79 new_l1_table = qemu_try_blockalign(bs->file->bs, 80 align_offset(new_l1_size2, 512)); 81 if (new_l1_table == NULL) { 82 return -ENOMEM; 83 } 84 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 85 86 if (s->l1_size) { 87 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 88 } 89 90 /* write new table (align to cluster) */ 91 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 92 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 93 if (new_l1_table_offset < 0) { 94 qemu_vfree(new_l1_table); 95 return new_l1_table_offset; 96 } 97 98 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 99 if (ret < 0) { 100 goto fail; 101 } 102 103 /* the L1 position has not yet been updated, so these clusters must 104 * indeed be completely free */ 105 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 106 new_l1_size2); 107 if (ret < 0) { 108 goto fail; 109 } 110 111 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 112 for(i = 0; i < s->l1_size; i++) 113 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 114 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 115 new_l1_table, new_l1_size2); 116 if (ret < 0) 117 goto fail; 118 for(i = 0; i < s->l1_size; i++) 119 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 120 121 /* set new table */ 122 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 123 stl_be_p(data, new_l1_size); 124 stq_be_p(data + 4, new_l1_table_offset); 125 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 126 data, sizeof(data)); 127 if (ret < 0) { 128 goto fail; 129 } 130 qemu_vfree(s->l1_table); 131 old_l1_table_offset = s->l1_table_offset; 132 s->l1_table_offset = new_l1_table_offset; 133 s->l1_table = new_l1_table; 134 old_l1_size = s->l1_size; 135 s->l1_size = new_l1_size; 136 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 137 QCOW2_DISCARD_OTHER); 138 return 0; 139 fail: 140 qemu_vfree(new_l1_table); 141 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 142 QCOW2_DISCARD_OTHER); 143 return ret; 144 } 145 146 /* 147 * l2_load 148 * 149 * Loads a L2 table into memory. If the table is in the cache, the cache 150 * is used; otherwise the L2 table is loaded from the image file. 151 * 152 * Returns a pointer to the L2 table on success, or NULL if the read from 153 * the image file failed. 154 */ 155 156 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 157 uint64_t **l2_table) 158 { 159 BDRVQcow2State *s = bs->opaque; 160 161 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 162 (void **)l2_table); 163 } 164 165 /* 166 * Writes one sector of the L1 table to the disk (can't update single entries 167 * and we really don't want bdrv_pread to perform a read-modify-write) 168 */ 169 #define L1_ENTRIES_PER_SECTOR (512 / 8) 170 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 171 { 172 BDRVQcow2State *s = bs->opaque; 173 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 174 int l1_start_index; 175 int i, ret; 176 177 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 178 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 179 i++) 180 { 181 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 182 } 183 184 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 185 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 186 if (ret < 0) { 187 return ret; 188 } 189 190 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 191 ret = bdrv_pwrite_sync(bs->file, 192 s->l1_table_offset + 8 * l1_start_index, 193 buf, sizeof(buf)); 194 if (ret < 0) { 195 return ret; 196 } 197 198 return 0; 199 } 200 201 /* 202 * l2_allocate 203 * 204 * Allocate a new l2 entry in the file. If l1_index points to an already 205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 206 * table) copy the contents of the old L2 table into the newly allocated one. 207 * Otherwise the new table is initialized with zeros. 208 * 209 */ 210 211 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 212 { 213 BDRVQcow2State *s = bs->opaque; 214 uint64_t old_l2_offset; 215 uint64_t *l2_table = NULL; 216 int64_t l2_offset; 217 int ret; 218 219 old_l2_offset = s->l1_table[l1_index]; 220 221 trace_qcow2_l2_allocate(bs, l1_index); 222 223 /* allocate a new l2 entry */ 224 225 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 226 if (l2_offset < 0) { 227 ret = l2_offset; 228 goto fail; 229 } 230 231 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 232 if (ret < 0) { 233 goto fail; 234 } 235 236 /* allocate a new entry in the l2 cache */ 237 238 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 239 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 240 if (ret < 0) { 241 goto fail; 242 } 243 244 l2_table = *table; 245 246 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 247 /* if there was no old l2 table, clear the new table */ 248 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 249 } else { 250 uint64_t* old_table; 251 252 /* if there was an old l2 table, read it from the disk */ 253 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 254 ret = qcow2_cache_get(bs, s->l2_table_cache, 255 old_l2_offset & L1E_OFFSET_MASK, 256 (void**) &old_table); 257 if (ret < 0) { 258 goto fail; 259 } 260 261 memcpy(l2_table, old_table, s->cluster_size); 262 263 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 264 } 265 266 /* write the l2 table to the file */ 267 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 268 269 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 270 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 271 ret = qcow2_cache_flush(bs, s->l2_table_cache); 272 if (ret < 0) { 273 goto fail; 274 } 275 276 /* update the L1 entry */ 277 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 278 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 279 ret = qcow2_write_l1_entry(bs, l1_index); 280 if (ret < 0) { 281 goto fail; 282 } 283 284 *table = l2_table; 285 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 286 return 0; 287 288 fail: 289 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 290 if (l2_table != NULL) { 291 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 292 } 293 s->l1_table[l1_index] = old_l2_offset; 294 if (l2_offset > 0) { 295 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 296 QCOW2_DISCARD_ALWAYS); 297 } 298 return ret; 299 } 300 301 /* 302 * Checks how many clusters in a given L2 table are contiguous in the image 303 * file. As soon as one of the flags in the bitmask stop_flags changes compared 304 * to the first cluster, the search is stopped and the cluster is not counted 305 * as contiguous. (This allows it, for example, to stop at the first compressed 306 * cluster which may require a different handling) 307 */ 308 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 309 uint64_t *l2_table, uint64_t stop_flags) 310 { 311 int i; 312 QCow2ClusterType first_cluster_type; 313 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 314 uint64_t first_entry = be64_to_cpu(l2_table[0]); 315 uint64_t offset = first_entry & mask; 316 317 if (!offset) { 318 return 0; 319 } 320 321 /* must be allocated */ 322 first_cluster_type = qcow2_get_cluster_type(first_entry); 323 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 324 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 325 326 for (i = 0; i < nb_clusters; i++) { 327 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 328 if (offset + (uint64_t) i * cluster_size != l2_entry) { 329 break; 330 } 331 } 332 333 return i; 334 } 335 336 /* 337 * Checks how many consecutive unallocated clusters in a given L2 338 * table have the same cluster type. 339 */ 340 static int count_contiguous_clusters_unallocated(int nb_clusters, 341 uint64_t *l2_table, 342 QCow2ClusterType wanted_type) 343 { 344 int i; 345 346 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 347 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 348 for (i = 0; i < nb_clusters; i++) { 349 uint64_t entry = be64_to_cpu(l2_table[i]); 350 QCow2ClusterType type = qcow2_get_cluster_type(entry); 351 352 if (type != wanted_type) { 353 break; 354 } 355 } 356 357 return i; 358 } 359 360 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 361 uint64_t src_cluster_offset, 362 unsigned offset_in_cluster, 363 QEMUIOVector *qiov) 364 { 365 int ret; 366 367 if (qiov->size == 0) { 368 return 0; 369 } 370 371 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 372 373 if (!bs->drv) { 374 return -ENOMEDIUM; 375 } 376 377 /* Call .bdrv_co_readv() directly instead of using the public block-layer 378 * interface. This avoids double I/O throttling and request tracking, 379 * which can lead to deadlock when block layer copy-on-read is enabled. 380 */ 381 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 382 qiov->size, qiov, 0); 383 if (ret < 0) { 384 return ret; 385 } 386 387 return 0; 388 } 389 390 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, 391 uint64_t src_cluster_offset, 392 uint64_t cluster_offset, 393 unsigned offset_in_cluster, 394 uint8_t *buffer, 395 unsigned bytes) 396 { 397 if (bytes && bs->encrypted) { 398 BDRVQcow2State *s = bs->opaque; 399 int64_t sector = (s->crypt_physical_offset ? 400 (cluster_offset + offset_in_cluster) : 401 (src_cluster_offset + offset_in_cluster)) 402 >> BDRV_SECTOR_BITS; 403 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 404 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 405 assert(s->crypto); 406 if (qcrypto_block_encrypt(s->crypto, sector, buffer, 407 bytes, NULL) < 0) { 408 return false; 409 } 410 } 411 return true; 412 } 413 414 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 415 uint64_t cluster_offset, 416 unsigned offset_in_cluster, 417 QEMUIOVector *qiov) 418 { 419 int ret; 420 421 if (qiov->size == 0) { 422 return 0; 423 } 424 425 ret = qcow2_pre_write_overlap_check(bs, 0, 426 cluster_offset + offset_in_cluster, qiov->size); 427 if (ret < 0) { 428 return ret; 429 } 430 431 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 432 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, 433 qiov->size, qiov, 0); 434 if (ret < 0) { 435 return ret; 436 } 437 438 return 0; 439 } 440 441 442 /* 443 * get_cluster_offset 444 * 445 * For a given offset of the virtual disk, find the cluster type and offset in 446 * the qcow2 file. The offset is stored in *cluster_offset. 447 * 448 * On entry, *bytes is the maximum number of contiguous bytes starting at 449 * offset that we are interested in. 450 * 451 * On exit, *bytes is the number of bytes starting at offset that have the same 452 * cluster type and (if applicable) are stored contiguously in the image file. 453 * Compressed clusters are always returned one by one. 454 * 455 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 456 * cases. 457 */ 458 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 459 unsigned int *bytes, uint64_t *cluster_offset) 460 { 461 BDRVQcow2State *s = bs->opaque; 462 unsigned int l2_index; 463 uint64_t l1_index, l2_offset, *l2_table; 464 int l1_bits, c; 465 unsigned int offset_in_cluster; 466 uint64_t bytes_available, bytes_needed, nb_clusters; 467 QCow2ClusterType type; 468 int ret; 469 470 offset_in_cluster = offset_into_cluster(s, offset); 471 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 472 473 l1_bits = s->l2_bits + s->cluster_bits; 474 475 /* compute how many bytes there are between the start of the cluster 476 * containing offset and the end of the l1 entry */ 477 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)) 478 + offset_in_cluster; 479 480 if (bytes_needed > bytes_available) { 481 bytes_needed = bytes_available; 482 } 483 484 *cluster_offset = 0; 485 486 /* seek to the l2 offset in the l1 table */ 487 488 l1_index = offset >> l1_bits; 489 if (l1_index >= s->l1_size) { 490 type = QCOW2_CLUSTER_UNALLOCATED; 491 goto out; 492 } 493 494 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 495 if (!l2_offset) { 496 type = QCOW2_CLUSTER_UNALLOCATED; 497 goto out; 498 } 499 500 if (offset_into_cluster(s, l2_offset)) { 501 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 502 " unaligned (L1 index: %#" PRIx64 ")", 503 l2_offset, l1_index); 504 return -EIO; 505 } 506 507 /* load the l2 table in memory */ 508 509 ret = l2_load(bs, l2_offset, &l2_table); 510 if (ret < 0) { 511 return ret; 512 } 513 514 /* find the cluster offset for the given disk offset */ 515 516 l2_index = offset_to_l2_index(s, offset); 517 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 518 519 nb_clusters = size_to_clusters(s, bytes_needed); 520 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 521 * integers; the minimum cluster size is 512, so this assertion is always 522 * true */ 523 assert(nb_clusters <= INT_MAX); 524 525 type = qcow2_get_cluster_type(*cluster_offset); 526 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 527 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 528 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 529 " in pre-v3 image (L2 offset: %#" PRIx64 530 ", L2 index: %#x)", l2_offset, l2_index); 531 ret = -EIO; 532 goto fail; 533 } 534 switch (type) { 535 case QCOW2_CLUSTER_COMPRESSED: 536 /* Compressed clusters can only be processed one by one */ 537 c = 1; 538 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 539 break; 540 case QCOW2_CLUSTER_ZERO_PLAIN: 541 case QCOW2_CLUSTER_UNALLOCATED: 542 /* how many empty clusters ? */ 543 c = count_contiguous_clusters_unallocated(nb_clusters, 544 &l2_table[l2_index], type); 545 *cluster_offset = 0; 546 break; 547 case QCOW2_CLUSTER_ZERO_ALLOC: 548 case QCOW2_CLUSTER_NORMAL: 549 /* how many allocated clusters ? */ 550 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 551 &l2_table[l2_index], QCOW_OFLAG_ZERO); 552 *cluster_offset &= L2E_OFFSET_MASK; 553 if (offset_into_cluster(s, *cluster_offset)) { 554 qcow2_signal_corruption(bs, true, -1, -1, 555 "Cluster allocation offset %#" 556 PRIx64 " unaligned (L2 offset: %#" PRIx64 557 ", L2 index: %#x)", *cluster_offset, 558 l2_offset, l2_index); 559 ret = -EIO; 560 goto fail; 561 } 562 break; 563 default: 564 abort(); 565 } 566 567 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 568 569 bytes_available = (int64_t)c * s->cluster_size; 570 571 out: 572 if (bytes_available > bytes_needed) { 573 bytes_available = bytes_needed; 574 } 575 576 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 577 * subtracting offset_in_cluster will therefore definitely yield something 578 * not exceeding UINT_MAX */ 579 assert(bytes_available - offset_in_cluster <= UINT_MAX); 580 *bytes = bytes_available - offset_in_cluster; 581 582 return type; 583 584 fail: 585 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 586 return ret; 587 } 588 589 /* 590 * get_cluster_table 591 * 592 * for a given disk offset, load (and allocate if needed) 593 * the l2 table. 594 * 595 * the l2 table offset in the qcow2 file and the cluster index 596 * in the l2 table are given to the caller. 597 * 598 * Returns 0 on success, -errno in failure case 599 */ 600 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 601 uint64_t **new_l2_table, 602 int *new_l2_index) 603 { 604 BDRVQcow2State *s = bs->opaque; 605 unsigned int l2_index; 606 uint64_t l1_index, l2_offset; 607 uint64_t *l2_table = NULL; 608 int ret; 609 610 /* seek to the l2 offset in the l1 table */ 611 612 l1_index = offset >> (s->l2_bits + s->cluster_bits); 613 if (l1_index >= s->l1_size) { 614 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 615 if (ret < 0) { 616 return ret; 617 } 618 } 619 620 assert(l1_index < s->l1_size); 621 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 622 if (offset_into_cluster(s, l2_offset)) { 623 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 624 " unaligned (L1 index: %#" PRIx64 ")", 625 l2_offset, l1_index); 626 return -EIO; 627 } 628 629 /* seek the l2 table of the given l2 offset */ 630 631 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 632 /* load the l2 table in memory */ 633 ret = l2_load(bs, l2_offset, &l2_table); 634 if (ret < 0) { 635 return ret; 636 } 637 } else { 638 /* First allocate a new L2 table (and do COW if needed) */ 639 ret = l2_allocate(bs, l1_index, &l2_table); 640 if (ret < 0) { 641 return ret; 642 } 643 644 /* Then decrease the refcount of the old table */ 645 if (l2_offset) { 646 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 647 QCOW2_DISCARD_OTHER); 648 } 649 } 650 651 /* find the cluster offset for the given disk offset */ 652 653 l2_index = offset_to_l2_index(s, offset); 654 655 *new_l2_table = l2_table; 656 *new_l2_index = l2_index; 657 658 return 0; 659 } 660 661 /* 662 * alloc_compressed_cluster_offset 663 * 664 * For a given offset of the disk image, return cluster offset in 665 * qcow2 file. 666 * 667 * If the offset is not found, allocate a new compressed cluster. 668 * 669 * Return the cluster offset if successful, 670 * Return 0, otherwise. 671 * 672 */ 673 674 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 675 uint64_t offset, 676 int compressed_size) 677 { 678 BDRVQcow2State *s = bs->opaque; 679 int l2_index, ret; 680 uint64_t *l2_table; 681 int64_t cluster_offset; 682 int nb_csectors; 683 684 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 685 if (ret < 0) { 686 return 0; 687 } 688 689 /* Compression can't overwrite anything. Fail if the cluster was already 690 * allocated. */ 691 cluster_offset = be64_to_cpu(l2_table[l2_index]); 692 if (cluster_offset & L2E_OFFSET_MASK) { 693 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 694 return 0; 695 } 696 697 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 698 if (cluster_offset < 0) { 699 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 700 return 0; 701 } 702 703 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 704 (cluster_offset >> 9); 705 706 cluster_offset |= QCOW_OFLAG_COMPRESSED | 707 ((uint64_t)nb_csectors << s->csize_shift); 708 709 /* update L2 table */ 710 711 /* compressed clusters never have the copied flag */ 712 713 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 714 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 715 l2_table[l2_index] = cpu_to_be64(cluster_offset); 716 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 717 718 return cluster_offset; 719 } 720 721 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 722 { 723 BDRVQcow2State *s = bs->opaque; 724 Qcow2COWRegion *start = &m->cow_start; 725 Qcow2COWRegion *end = &m->cow_end; 726 unsigned buffer_size; 727 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 728 bool merge_reads; 729 uint8_t *start_buffer, *end_buffer; 730 QEMUIOVector qiov; 731 int ret; 732 733 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 734 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 735 assert(start->offset + start->nb_bytes <= end->offset); 736 assert(!m->data_qiov || m->data_qiov->size == data_bytes); 737 738 if (start->nb_bytes == 0 && end->nb_bytes == 0) { 739 return 0; 740 } 741 742 /* If we have to read both the start and end COW regions and the 743 * middle region is not too large then perform just one read 744 * operation */ 745 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 746 if (merge_reads) { 747 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 748 } else { 749 /* If we have to do two reads, add some padding in the middle 750 * if necessary to make sure that the end region is optimally 751 * aligned. */ 752 size_t align = bdrv_opt_mem_align(bs); 753 assert(align > 0 && align <= UINT_MAX); 754 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 755 UINT_MAX - end->nb_bytes); 756 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 757 } 758 759 /* Reserve a buffer large enough to store all the data that we're 760 * going to read */ 761 start_buffer = qemu_try_blockalign(bs, buffer_size); 762 if (start_buffer == NULL) { 763 return -ENOMEM; 764 } 765 /* The part of the buffer where the end region is located */ 766 end_buffer = start_buffer + buffer_size - end->nb_bytes; 767 768 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); 769 770 qemu_co_mutex_unlock(&s->lock); 771 /* First we read the existing data from both COW regions. We 772 * either read the whole region in one go, or the start and end 773 * regions separately. */ 774 if (merge_reads) { 775 qemu_iovec_add(&qiov, start_buffer, buffer_size); 776 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 777 } else { 778 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 779 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 780 if (ret < 0) { 781 goto fail; 782 } 783 784 qemu_iovec_reset(&qiov); 785 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 786 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 787 } 788 if (ret < 0) { 789 goto fail; 790 } 791 792 /* Encrypt the data if necessary before writing it */ 793 if (bs->encrypted) { 794 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 795 start->offset, start_buffer, 796 start->nb_bytes) || 797 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 798 end->offset, end_buffer, end->nb_bytes)) { 799 ret = -EIO; 800 goto fail; 801 } 802 } 803 804 /* And now we can write everything. If we have the guest data we 805 * can write everything in one single operation */ 806 if (m->data_qiov) { 807 qemu_iovec_reset(&qiov); 808 if (start->nb_bytes) { 809 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 810 } 811 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); 812 if (end->nb_bytes) { 813 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 814 } 815 /* NOTE: we have a write_aio blkdebug event here followed by 816 * a cow_write one in do_perform_cow_write(), but there's only 817 * one single I/O operation */ 818 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 819 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 820 } else { 821 /* If there's no guest data then write both COW regions separately */ 822 qemu_iovec_reset(&qiov); 823 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 824 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 825 if (ret < 0) { 826 goto fail; 827 } 828 829 qemu_iovec_reset(&qiov); 830 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 831 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 832 } 833 834 fail: 835 qemu_co_mutex_lock(&s->lock); 836 837 /* 838 * Before we update the L2 table to actually point to the new cluster, we 839 * need to be sure that the refcounts have been increased and COW was 840 * handled. 841 */ 842 if (ret == 0) { 843 qcow2_cache_depends_on_flush(s->l2_table_cache); 844 } 845 846 qemu_vfree(start_buffer); 847 qemu_iovec_destroy(&qiov); 848 return ret; 849 } 850 851 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 852 { 853 BDRVQcow2State *s = bs->opaque; 854 int i, j = 0, l2_index, ret; 855 uint64_t *old_cluster, *l2_table; 856 uint64_t cluster_offset = m->alloc_offset; 857 858 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 859 assert(m->nb_clusters > 0); 860 861 old_cluster = g_try_new(uint64_t, m->nb_clusters); 862 if (old_cluster == NULL) { 863 ret = -ENOMEM; 864 goto err; 865 } 866 867 /* copy content of unmodified sectors */ 868 ret = perform_cow(bs, m); 869 if (ret < 0) { 870 goto err; 871 } 872 873 /* Update L2 table. */ 874 if (s->use_lazy_refcounts) { 875 qcow2_mark_dirty(bs); 876 } 877 if (qcow2_need_accurate_refcounts(s)) { 878 qcow2_cache_set_dependency(bs, s->l2_table_cache, 879 s->refcount_block_cache); 880 } 881 882 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 883 if (ret < 0) { 884 goto err; 885 } 886 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 887 888 assert(l2_index + m->nb_clusters <= s->l2_size); 889 for (i = 0; i < m->nb_clusters; i++) { 890 /* if two concurrent writes happen to the same unallocated cluster 891 * each write allocates separate cluster and writes data concurrently. 892 * The first one to complete updates l2 table with pointer to its 893 * cluster the second one has to do RMW (which is done above by 894 * perform_cow()), update l2 table with its cluster pointer and free 895 * old cluster. This is what this loop does */ 896 if (l2_table[l2_index + i] != 0) { 897 old_cluster[j++] = l2_table[l2_index + i]; 898 } 899 900 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 901 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 902 } 903 904 905 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 906 907 /* 908 * If this was a COW, we need to decrease the refcount of the old cluster. 909 * 910 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 911 * clusters), the next write will reuse them anyway. 912 */ 913 if (!m->keep_old_clusters && j != 0) { 914 for (i = 0; i < j; i++) { 915 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 916 QCOW2_DISCARD_NEVER); 917 } 918 } 919 920 ret = 0; 921 err: 922 g_free(old_cluster); 923 return ret; 924 } 925 926 /* 927 * Returns the number of contiguous clusters that can be used for an allocating 928 * write, but require COW to be performed (this includes yet unallocated space, 929 * which must copy from the backing file) 930 */ 931 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 932 uint64_t *l2_table, int l2_index) 933 { 934 int i; 935 936 for (i = 0; i < nb_clusters; i++) { 937 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 938 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 939 940 switch(cluster_type) { 941 case QCOW2_CLUSTER_NORMAL: 942 if (l2_entry & QCOW_OFLAG_COPIED) { 943 goto out; 944 } 945 break; 946 case QCOW2_CLUSTER_UNALLOCATED: 947 case QCOW2_CLUSTER_COMPRESSED: 948 case QCOW2_CLUSTER_ZERO_PLAIN: 949 case QCOW2_CLUSTER_ZERO_ALLOC: 950 break; 951 default: 952 abort(); 953 } 954 } 955 956 out: 957 assert(i <= nb_clusters); 958 return i; 959 } 960 961 /* 962 * Check if there already is an AIO write request in flight which allocates 963 * the same cluster. In this case we need to wait until the previous 964 * request has completed and updated the L2 table accordingly. 965 * 966 * Returns: 967 * 0 if there was no dependency. *cur_bytes indicates the number of 968 * bytes from guest_offset that can be read before the next 969 * dependency must be processed (or the request is complete) 970 * 971 * -EAGAIN if we had to wait for another request, previously gathered 972 * information on cluster allocation may be invalid now. The caller 973 * must start over anyway, so consider *cur_bytes undefined. 974 */ 975 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 976 uint64_t *cur_bytes, QCowL2Meta **m) 977 { 978 BDRVQcow2State *s = bs->opaque; 979 QCowL2Meta *old_alloc; 980 uint64_t bytes = *cur_bytes; 981 982 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 983 984 uint64_t start = guest_offset; 985 uint64_t end = start + bytes; 986 uint64_t old_start = l2meta_cow_start(old_alloc); 987 uint64_t old_end = l2meta_cow_end(old_alloc); 988 989 if (end <= old_start || start >= old_end) { 990 /* No intersection */ 991 } else { 992 if (start < old_start) { 993 /* Stop at the start of a running allocation */ 994 bytes = old_start - start; 995 } else { 996 bytes = 0; 997 } 998 999 /* Stop if already an l2meta exists. After yielding, it wouldn't 1000 * be valid any more, so we'd have to clean up the old L2Metas 1001 * and deal with requests depending on them before starting to 1002 * gather new ones. Not worth the trouble. */ 1003 if (bytes == 0 && *m) { 1004 *cur_bytes = 0; 1005 return 0; 1006 } 1007 1008 if (bytes == 0) { 1009 /* Wait for the dependency to complete. We need to recheck 1010 * the free/allocated clusters when we continue. */ 1011 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1012 return -EAGAIN; 1013 } 1014 } 1015 } 1016 1017 /* Make sure that existing clusters and new allocations are only used up to 1018 * the next dependency if we shortened the request above */ 1019 *cur_bytes = bytes; 1020 1021 return 0; 1022 } 1023 1024 /* 1025 * Checks how many already allocated clusters that don't require a copy on 1026 * write there are at the given guest_offset (up to *bytes). If 1027 * *host_offset is not zero, only physically contiguous clusters beginning at 1028 * this host offset are counted. 1029 * 1030 * Note that guest_offset may not be cluster aligned. In this case, the 1031 * returned *host_offset points to exact byte referenced by guest_offset and 1032 * therefore isn't cluster aligned as well. 1033 * 1034 * Returns: 1035 * 0: if no allocated clusters are available at the given offset. 1036 * *bytes is normally unchanged. It is set to 0 if the cluster 1037 * is allocated and doesn't need COW, but doesn't have the right 1038 * physical offset. 1039 * 1040 * 1: if allocated clusters that don't require a COW are available at 1041 * the requested offset. *bytes may have decreased and describes 1042 * the length of the area that can be written to. 1043 * 1044 * -errno: in error cases 1045 */ 1046 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1047 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1048 { 1049 BDRVQcow2State *s = bs->opaque; 1050 int l2_index; 1051 uint64_t cluster_offset; 1052 uint64_t *l2_table; 1053 uint64_t nb_clusters; 1054 unsigned int keep_clusters; 1055 int ret; 1056 1057 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1058 *bytes); 1059 1060 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 1061 == offset_into_cluster(s, *host_offset)); 1062 1063 /* 1064 * Calculate the number of clusters to look for. We stop at L2 table 1065 * boundaries to keep things simple. 1066 */ 1067 nb_clusters = 1068 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1069 1070 l2_index = offset_to_l2_index(s, guest_offset); 1071 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1072 assert(nb_clusters <= INT_MAX); 1073 1074 /* Find L2 entry for the first involved cluster */ 1075 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1076 if (ret < 0) { 1077 return ret; 1078 } 1079 1080 cluster_offset = be64_to_cpu(l2_table[l2_index]); 1081 1082 /* Check how many clusters are already allocated and don't need COW */ 1083 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1084 && (cluster_offset & QCOW_OFLAG_COPIED)) 1085 { 1086 /* If a specific host_offset is required, check it */ 1087 bool offset_matches = 1088 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1089 1090 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1091 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1092 "%#llx unaligned (guest offset: %#" PRIx64 1093 ")", cluster_offset & L2E_OFFSET_MASK, 1094 guest_offset); 1095 ret = -EIO; 1096 goto out; 1097 } 1098 1099 if (*host_offset != 0 && !offset_matches) { 1100 *bytes = 0; 1101 ret = 0; 1102 goto out; 1103 } 1104 1105 /* We keep all QCOW_OFLAG_COPIED clusters */ 1106 keep_clusters = 1107 count_contiguous_clusters(nb_clusters, s->cluster_size, 1108 &l2_table[l2_index], 1109 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1110 assert(keep_clusters <= nb_clusters); 1111 1112 *bytes = MIN(*bytes, 1113 keep_clusters * s->cluster_size 1114 - offset_into_cluster(s, guest_offset)); 1115 1116 ret = 1; 1117 } else { 1118 ret = 0; 1119 } 1120 1121 /* Cleanup */ 1122 out: 1123 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1124 1125 /* Only return a host offset if we actually made progress. Otherwise we 1126 * would make requirements for handle_alloc() that it can't fulfill */ 1127 if (ret > 0) { 1128 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1129 + offset_into_cluster(s, guest_offset); 1130 } 1131 1132 return ret; 1133 } 1134 1135 /* 1136 * Allocates new clusters for the given guest_offset. 1137 * 1138 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1139 * contain the number of clusters that have been allocated and are contiguous 1140 * in the image file. 1141 * 1142 * If *host_offset is non-zero, it specifies the offset in the image file at 1143 * which the new clusters must start. *nb_clusters can be 0 on return in this 1144 * case if the cluster at host_offset is already in use. If *host_offset is 1145 * zero, the clusters can be allocated anywhere in the image file. 1146 * 1147 * *host_offset is updated to contain the offset into the image file at which 1148 * the first allocated cluster starts. 1149 * 1150 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1151 * function has been waiting for another request and the allocation must be 1152 * restarted, but the whole request should not be failed. 1153 */ 1154 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1155 uint64_t *host_offset, uint64_t *nb_clusters) 1156 { 1157 BDRVQcow2State *s = bs->opaque; 1158 1159 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1160 *host_offset, *nb_clusters); 1161 1162 /* Allocate new clusters */ 1163 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1164 if (*host_offset == 0) { 1165 int64_t cluster_offset = 1166 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1167 if (cluster_offset < 0) { 1168 return cluster_offset; 1169 } 1170 *host_offset = cluster_offset; 1171 return 0; 1172 } else { 1173 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1174 if (ret < 0) { 1175 return ret; 1176 } 1177 *nb_clusters = ret; 1178 return 0; 1179 } 1180 } 1181 1182 /* 1183 * Allocates new clusters for an area that either is yet unallocated or needs a 1184 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1185 * the new allocation can match the specified host offset. 1186 * 1187 * Note that guest_offset may not be cluster aligned. In this case, the 1188 * returned *host_offset points to exact byte referenced by guest_offset and 1189 * therefore isn't cluster aligned as well. 1190 * 1191 * Returns: 1192 * 0: if no clusters could be allocated. *bytes is set to 0, 1193 * *host_offset is left unchanged. 1194 * 1195 * 1: if new clusters were allocated. *bytes may be decreased if the 1196 * new allocation doesn't cover all of the requested area. 1197 * *host_offset is updated to contain the host offset of the first 1198 * newly allocated cluster. 1199 * 1200 * -errno: in error cases 1201 */ 1202 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1203 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1204 { 1205 BDRVQcow2State *s = bs->opaque; 1206 int l2_index; 1207 uint64_t *l2_table; 1208 uint64_t entry; 1209 uint64_t nb_clusters; 1210 int ret; 1211 bool keep_old_clusters = false; 1212 1213 uint64_t alloc_cluster_offset = 0; 1214 1215 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1216 *bytes); 1217 assert(*bytes > 0); 1218 1219 /* 1220 * Calculate the number of clusters to look for. We stop at L2 table 1221 * boundaries to keep things simple. 1222 */ 1223 nb_clusters = 1224 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1225 1226 l2_index = offset_to_l2_index(s, guest_offset); 1227 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1228 assert(nb_clusters <= INT_MAX); 1229 1230 /* Find L2 entry for the first involved cluster */ 1231 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1232 if (ret < 0) { 1233 return ret; 1234 } 1235 1236 entry = be64_to_cpu(l2_table[l2_index]); 1237 1238 /* For the moment, overwrite compressed clusters one by one */ 1239 if (entry & QCOW_OFLAG_COMPRESSED) { 1240 nb_clusters = 1; 1241 } else { 1242 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1243 } 1244 1245 /* This function is only called when there were no non-COW clusters, so if 1246 * we can't find any unallocated or COW clusters either, something is 1247 * wrong with our code. */ 1248 assert(nb_clusters > 0); 1249 1250 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1251 (entry & QCOW_OFLAG_COPIED) && 1252 (!*host_offset || 1253 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1254 { 1255 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1256 * would be fine, too, but count_cow_clusters() above has limited 1257 * nb_clusters already to a range of COW clusters */ 1258 int preallocated_nb_clusters = 1259 count_contiguous_clusters(nb_clusters, s->cluster_size, 1260 &l2_table[l2_index], QCOW_OFLAG_COPIED); 1261 assert(preallocated_nb_clusters > 0); 1262 1263 nb_clusters = preallocated_nb_clusters; 1264 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1265 1266 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1267 * should not free them. */ 1268 keep_old_clusters = true; 1269 } 1270 1271 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1272 1273 if (!alloc_cluster_offset) { 1274 /* Allocate, if necessary at a given offset in the image file */ 1275 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1276 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1277 &nb_clusters); 1278 if (ret < 0) { 1279 goto fail; 1280 } 1281 1282 /* Can't extend contiguous allocation */ 1283 if (nb_clusters == 0) { 1284 *bytes = 0; 1285 return 0; 1286 } 1287 1288 /* !*host_offset would overwrite the image header and is reserved for 1289 * "no host offset preferred". If 0 was a valid host offset, it'd 1290 * trigger the following overlap check; do that now to avoid having an 1291 * invalid value in *host_offset. */ 1292 if (!alloc_cluster_offset) { 1293 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1294 nb_clusters * s->cluster_size); 1295 assert(ret < 0); 1296 goto fail; 1297 } 1298 } 1299 1300 /* 1301 * Save info needed for meta data update. 1302 * 1303 * requested_bytes: Number of bytes from the start of the first 1304 * newly allocated cluster to the end of the (possibly shortened 1305 * before) write request. 1306 * 1307 * avail_bytes: Number of bytes from the start of the first 1308 * newly allocated to the end of the last newly allocated cluster. 1309 * 1310 * nb_bytes: The number of bytes from the start of the first 1311 * newly allocated cluster to the end of the area that the write 1312 * request actually writes to (excluding COW at the end) 1313 */ 1314 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1315 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1316 int nb_bytes = MIN(requested_bytes, avail_bytes); 1317 QCowL2Meta *old_m = *m; 1318 1319 *m = g_malloc0(sizeof(**m)); 1320 1321 **m = (QCowL2Meta) { 1322 .next = old_m, 1323 1324 .alloc_offset = alloc_cluster_offset, 1325 .offset = start_of_cluster(s, guest_offset), 1326 .nb_clusters = nb_clusters, 1327 1328 .keep_old_clusters = keep_old_clusters, 1329 1330 .cow_start = { 1331 .offset = 0, 1332 .nb_bytes = offset_into_cluster(s, guest_offset), 1333 }, 1334 .cow_end = { 1335 .offset = nb_bytes, 1336 .nb_bytes = avail_bytes - nb_bytes, 1337 }, 1338 }; 1339 qemu_co_queue_init(&(*m)->dependent_requests); 1340 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1341 1342 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1343 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1344 assert(*bytes != 0); 1345 1346 return 1; 1347 1348 fail: 1349 if (*m && (*m)->nb_clusters > 0) { 1350 QLIST_REMOVE(*m, next_in_flight); 1351 } 1352 return ret; 1353 } 1354 1355 /* 1356 * alloc_cluster_offset 1357 * 1358 * For a given offset on the virtual disk, find the cluster offset in qcow2 1359 * file. If the offset is not found, allocate a new cluster. 1360 * 1361 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1362 * other fields in m are meaningless. 1363 * 1364 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1365 * contiguous clusters that have been allocated. In this case, the other 1366 * fields of m are valid and contain information about the first allocated 1367 * cluster. 1368 * 1369 * If the request conflicts with another write request in flight, the coroutine 1370 * is queued and will be reentered when the dependency has completed. 1371 * 1372 * Return 0 on success and -errno in error cases 1373 */ 1374 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1375 unsigned int *bytes, uint64_t *host_offset, 1376 QCowL2Meta **m) 1377 { 1378 BDRVQcow2State *s = bs->opaque; 1379 uint64_t start, remaining; 1380 uint64_t cluster_offset; 1381 uint64_t cur_bytes; 1382 int ret; 1383 1384 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1385 1386 again: 1387 start = offset; 1388 remaining = *bytes; 1389 cluster_offset = 0; 1390 *host_offset = 0; 1391 cur_bytes = 0; 1392 *m = NULL; 1393 1394 while (true) { 1395 1396 if (!*host_offset) { 1397 *host_offset = start_of_cluster(s, cluster_offset); 1398 } 1399 1400 assert(remaining >= cur_bytes); 1401 1402 start += cur_bytes; 1403 remaining -= cur_bytes; 1404 cluster_offset += cur_bytes; 1405 1406 if (remaining == 0) { 1407 break; 1408 } 1409 1410 cur_bytes = remaining; 1411 1412 /* 1413 * Now start gathering as many contiguous clusters as possible: 1414 * 1415 * 1. Check for overlaps with in-flight allocations 1416 * 1417 * a) Overlap not in the first cluster -> shorten this request and 1418 * let the caller handle the rest in its next loop iteration. 1419 * 1420 * b) Real overlaps of two requests. Yield and restart the search 1421 * for contiguous clusters (the situation could have changed 1422 * while we were sleeping) 1423 * 1424 * c) TODO: Request starts in the same cluster as the in-flight 1425 * allocation ends. Shorten the COW of the in-fight allocation, 1426 * set cluster_offset to write to the same cluster and set up 1427 * the right synchronisation between the in-flight request and 1428 * the new one. 1429 */ 1430 ret = handle_dependencies(bs, start, &cur_bytes, m); 1431 if (ret == -EAGAIN) { 1432 /* Currently handle_dependencies() doesn't yield if we already had 1433 * an allocation. If it did, we would have to clean up the L2Meta 1434 * structs before starting over. */ 1435 assert(*m == NULL); 1436 goto again; 1437 } else if (ret < 0) { 1438 return ret; 1439 } else if (cur_bytes == 0) { 1440 break; 1441 } else { 1442 /* handle_dependencies() may have decreased cur_bytes (shortened 1443 * the allocations below) so that the next dependency is processed 1444 * correctly during the next loop iteration. */ 1445 } 1446 1447 /* 1448 * 2. Count contiguous COPIED clusters. 1449 */ 1450 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1451 if (ret < 0) { 1452 return ret; 1453 } else if (ret) { 1454 continue; 1455 } else if (cur_bytes == 0) { 1456 break; 1457 } 1458 1459 /* 1460 * 3. If the request still hasn't completed, allocate new clusters, 1461 * considering any cluster_offset of steps 1c or 2. 1462 */ 1463 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1464 if (ret < 0) { 1465 return ret; 1466 } else if (ret) { 1467 continue; 1468 } else { 1469 assert(cur_bytes == 0); 1470 break; 1471 } 1472 } 1473 1474 *bytes -= remaining; 1475 assert(*bytes > 0); 1476 assert(*host_offset != 0); 1477 1478 return 0; 1479 } 1480 1481 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1482 const uint8_t *buf, int buf_size) 1483 { 1484 z_stream strm1, *strm = &strm1; 1485 int ret, out_len; 1486 1487 memset(strm, 0, sizeof(*strm)); 1488 1489 strm->next_in = (uint8_t *)buf; 1490 strm->avail_in = buf_size; 1491 strm->next_out = out_buf; 1492 strm->avail_out = out_buf_size; 1493 1494 ret = inflateInit2(strm, -12); 1495 if (ret != Z_OK) 1496 return -1; 1497 ret = inflate(strm, Z_FINISH); 1498 out_len = strm->next_out - out_buf; 1499 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1500 out_len != out_buf_size) { 1501 inflateEnd(strm); 1502 return -1; 1503 } 1504 inflateEnd(strm); 1505 return 0; 1506 } 1507 1508 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1509 { 1510 BDRVQcow2State *s = bs->opaque; 1511 int ret, csize, nb_csectors, sector_offset; 1512 uint64_t coffset; 1513 1514 coffset = cluster_offset & s->cluster_offset_mask; 1515 if (s->cluster_cache_offset != coffset) { 1516 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1517 sector_offset = coffset & 511; 1518 csize = nb_csectors * 512 - sector_offset; 1519 1520 /* Allocate buffers on first decompress operation, most images are 1521 * uncompressed and the memory overhead can be avoided. The buffers 1522 * are freed in .bdrv_close(). 1523 */ 1524 if (!s->cluster_data) { 1525 /* one more sector for decompressed data alignment */ 1526 s->cluster_data = qemu_try_blockalign(bs->file->bs, 1527 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512); 1528 if (!s->cluster_data) { 1529 return -ENOMEM; 1530 } 1531 } 1532 if (!s->cluster_cache) { 1533 s->cluster_cache = g_malloc(s->cluster_size); 1534 } 1535 1536 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1537 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, 1538 nb_csectors); 1539 if (ret < 0) { 1540 return ret; 1541 } 1542 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1543 s->cluster_data + sector_offset, csize) < 0) { 1544 return -EIO; 1545 } 1546 s->cluster_cache_offset = coffset; 1547 } 1548 return 0; 1549 } 1550 1551 /* 1552 * This discards as many clusters of nb_clusters as possible at once (i.e. 1553 * all clusters in the same L2 table) and returns the number of discarded 1554 * clusters. 1555 */ 1556 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1557 uint64_t nb_clusters, enum qcow2_discard_type type, 1558 bool full_discard) 1559 { 1560 BDRVQcow2State *s = bs->opaque; 1561 uint64_t *l2_table; 1562 int l2_index; 1563 int ret; 1564 int i; 1565 1566 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1567 if (ret < 0) { 1568 return ret; 1569 } 1570 1571 /* Limit nb_clusters to one L2 table */ 1572 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1573 assert(nb_clusters <= INT_MAX); 1574 1575 for (i = 0; i < nb_clusters; i++) { 1576 uint64_t old_l2_entry; 1577 1578 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1579 1580 /* 1581 * If full_discard is false, make sure that a discarded area reads back 1582 * as zeroes for v3 images (we cannot do it for v2 without actually 1583 * writing a zero-filled buffer). We can skip the operation if the 1584 * cluster is already marked as zero, or if it's unallocated and we 1585 * don't have a backing file. 1586 * 1587 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1588 * holding s->lock, so that doesn't work today. 1589 * 1590 * If full_discard is true, the sector should not read back as zeroes, 1591 * but rather fall through to the backing file. 1592 */ 1593 switch (qcow2_get_cluster_type(old_l2_entry)) { 1594 case QCOW2_CLUSTER_UNALLOCATED: 1595 if (full_discard || !bs->backing) { 1596 continue; 1597 } 1598 break; 1599 1600 case QCOW2_CLUSTER_ZERO_PLAIN: 1601 if (!full_discard) { 1602 continue; 1603 } 1604 break; 1605 1606 case QCOW2_CLUSTER_ZERO_ALLOC: 1607 case QCOW2_CLUSTER_NORMAL: 1608 case QCOW2_CLUSTER_COMPRESSED: 1609 break; 1610 1611 default: 1612 abort(); 1613 } 1614 1615 /* First remove L2 entries */ 1616 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1617 if (!full_discard && s->qcow_version >= 3) { 1618 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1619 } else { 1620 l2_table[l2_index + i] = cpu_to_be64(0); 1621 } 1622 1623 /* Then decrease the refcount */ 1624 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1625 } 1626 1627 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1628 1629 return nb_clusters; 1630 } 1631 1632 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1633 uint64_t bytes, enum qcow2_discard_type type, 1634 bool full_discard) 1635 { 1636 BDRVQcow2State *s = bs->opaque; 1637 uint64_t end_offset = offset + bytes; 1638 uint64_t nb_clusters; 1639 int64_t cleared; 1640 int ret; 1641 1642 /* Caller must pass aligned values, except at image end */ 1643 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1644 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1645 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1646 1647 nb_clusters = size_to_clusters(s, bytes); 1648 1649 s->cache_discards = true; 1650 1651 /* Each L2 table is handled by its own loop iteration */ 1652 while (nb_clusters > 0) { 1653 cleared = discard_single_l2(bs, offset, nb_clusters, type, 1654 full_discard); 1655 if (cleared < 0) { 1656 ret = cleared; 1657 goto fail; 1658 } 1659 1660 nb_clusters -= cleared; 1661 offset += (cleared * s->cluster_size); 1662 } 1663 1664 ret = 0; 1665 fail: 1666 s->cache_discards = false; 1667 qcow2_process_discards(bs, ret); 1668 1669 return ret; 1670 } 1671 1672 /* 1673 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1674 * all clusters in the same L2 table) and returns the number of zeroed 1675 * clusters. 1676 */ 1677 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1678 uint64_t nb_clusters, int flags) 1679 { 1680 BDRVQcow2State *s = bs->opaque; 1681 uint64_t *l2_table; 1682 int l2_index; 1683 int ret; 1684 int i; 1685 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1686 1687 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1688 if (ret < 0) { 1689 return ret; 1690 } 1691 1692 /* Limit nb_clusters to one L2 table */ 1693 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1694 assert(nb_clusters <= INT_MAX); 1695 1696 for (i = 0; i < nb_clusters; i++) { 1697 uint64_t old_offset; 1698 QCow2ClusterType cluster_type; 1699 1700 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1701 1702 /* 1703 * Minimize L2 changes if the cluster already reads back as 1704 * zeroes with correct allocation. 1705 */ 1706 cluster_type = qcow2_get_cluster_type(old_offset); 1707 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1708 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1709 continue; 1710 } 1711 1712 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1713 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1714 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1715 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1716 } else { 1717 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1718 } 1719 } 1720 1721 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1722 1723 return nb_clusters; 1724 } 1725 1726 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1727 uint64_t bytes, int flags) 1728 { 1729 BDRVQcow2State *s = bs->opaque; 1730 uint64_t end_offset = offset + bytes; 1731 uint64_t nb_clusters; 1732 int64_t cleared; 1733 int ret; 1734 1735 /* Caller must pass aligned values, except at image end */ 1736 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1737 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1738 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1739 1740 /* The zero flag is only supported by version 3 and newer */ 1741 if (s->qcow_version < 3) { 1742 return -ENOTSUP; 1743 } 1744 1745 /* Each L2 table is handled by its own loop iteration */ 1746 nb_clusters = size_to_clusters(s, bytes); 1747 1748 s->cache_discards = true; 1749 1750 while (nb_clusters > 0) { 1751 cleared = zero_single_l2(bs, offset, nb_clusters, flags); 1752 if (cleared < 0) { 1753 ret = cleared; 1754 goto fail; 1755 } 1756 1757 nb_clusters -= cleared; 1758 offset += (cleared * s->cluster_size); 1759 } 1760 1761 ret = 0; 1762 fail: 1763 s->cache_discards = false; 1764 qcow2_process_discards(bs, ret); 1765 1766 return ret; 1767 } 1768 1769 /* 1770 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1771 * non-backed non-pre-allocated zero clusters). 1772 * 1773 * l1_entries and *visited_l1_entries are used to keep track of progress for 1774 * status_cb(). l1_entries contains the total number of L1 entries and 1775 * *visited_l1_entries counts all visited L1 entries. 1776 */ 1777 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1778 int l1_size, int64_t *visited_l1_entries, 1779 int64_t l1_entries, 1780 BlockDriverAmendStatusCB *status_cb, 1781 void *cb_opaque) 1782 { 1783 BDRVQcow2State *s = bs->opaque; 1784 bool is_active_l1 = (l1_table == s->l1_table); 1785 uint64_t *l2_table = NULL; 1786 int ret; 1787 int i, j; 1788 1789 if (!is_active_l1) { 1790 /* inactive L2 tables require a buffer to be stored in when loading 1791 * them from disk */ 1792 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size); 1793 if (l2_table == NULL) { 1794 return -ENOMEM; 1795 } 1796 } 1797 1798 for (i = 0; i < l1_size; i++) { 1799 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1800 bool l2_dirty = false; 1801 uint64_t l2_refcount; 1802 1803 if (!l2_offset) { 1804 /* unallocated */ 1805 (*visited_l1_entries)++; 1806 if (status_cb) { 1807 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1808 } 1809 continue; 1810 } 1811 1812 if (offset_into_cluster(s, l2_offset)) { 1813 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1814 PRIx64 " unaligned (L1 index: %#x)", 1815 l2_offset, i); 1816 ret = -EIO; 1817 goto fail; 1818 } 1819 1820 if (is_active_l1) { 1821 /* get active L2 tables from cache */ 1822 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1823 (void **)&l2_table); 1824 } else { 1825 /* load inactive L2 tables from disk */ 1826 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1827 (void *)l2_table, s->cluster_sectors); 1828 } 1829 if (ret < 0) { 1830 goto fail; 1831 } 1832 1833 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1834 &l2_refcount); 1835 if (ret < 0) { 1836 goto fail; 1837 } 1838 1839 for (j = 0; j < s->l2_size; j++) { 1840 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1841 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1842 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 1843 1844 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1845 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1846 continue; 1847 } 1848 1849 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1850 if (!bs->backing) { 1851 /* not backed; therefore we can simply deallocate the 1852 * cluster */ 1853 l2_table[j] = 0; 1854 l2_dirty = true; 1855 continue; 1856 } 1857 1858 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1859 if (offset < 0) { 1860 ret = offset; 1861 goto fail; 1862 } 1863 1864 if (l2_refcount > 1) { 1865 /* For shared L2 tables, set the refcount accordingly (it is 1866 * already 1 and needs to be l2_refcount) */ 1867 ret = qcow2_update_cluster_refcount(bs, 1868 offset >> s->cluster_bits, 1869 refcount_diff(1, l2_refcount), false, 1870 QCOW2_DISCARD_OTHER); 1871 if (ret < 0) { 1872 qcow2_free_clusters(bs, offset, s->cluster_size, 1873 QCOW2_DISCARD_OTHER); 1874 goto fail; 1875 } 1876 } 1877 } 1878 1879 if (offset_into_cluster(s, offset)) { 1880 qcow2_signal_corruption(bs, true, -1, -1, 1881 "Cluster allocation offset " 1882 "%#" PRIx64 " unaligned (L2 offset: %#" 1883 PRIx64 ", L2 index: %#x)", offset, 1884 l2_offset, j); 1885 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1886 qcow2_free_clusters(bs, offset, s->cluster_size, 1887 QCOW2_DISCARD_ALWAYS); 1888 } 1889 ret = -EIO; 1890 goto fail; 1891 } 1892 1893 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1894 if (ret < 0) { 1895 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1896 qcow2_free_clusters(bs, offset, s->cluster_size, 1897 QCOW2_DISCARD_ALWAYS); 1898 } 1899 goto fail; 1900 } 1901 1902 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0); 1903 if (ret < 0) { 1904 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1905 qcow2_free_clusters(bs, offset, s->cluster_size, 1906 QCOW2_DISCARD_ALWAYS); 1907 } 1908 goto fail; 1909 } 1910 1911 if (l2_refcount == 1) { 1912 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1913 } else { 1914 l2_table[j] = cpu_to_be64(offset); 1915 } 1916 l2_dirty = true; 1917 } 1918 1919 if (is_active_l1) { 1920 if (l2_dirty) { 1921 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1922 qcow2_cache_depends_on_flush(s->l2_table_cache); 1923 } 1924 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1925 } else { 1926 if (l2_dirty) { 1927 ret = qcow2_pre_write_overlap_check(bs, 1928 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1929 s->cluster_size); 1930 if (ret < 0) { 1931 goto fail; 1932 } 1933 1934 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1935 (void *)l2_table, s->cluster_sectors); 1936 if (ret < 0) { 1937 goto fail; 1938 } 1939 } 1940 } 1941 1942 (*visited_l1_entries)++; 1943 if (status_cb) { 1944 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1945 } 1946 } 1947 1948 ret = 0; 1949 1950 fail: 1951 if (l2_table) { 1952 if (!is_active_l1) { 1953 qemu_vfree(l2_table); 1954 } else { 1955 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1956 } 1957 } 1958 return ret; 1959 } 1960 1961 /* 1962 * For backed images, expands all zero clusters on the image. For non-backed 1963 * images, deallocates all non-pre-allocated zero clusters (and claims the 1964 * allocation for pre-allocated ones). This is important for downgrading to a 1965 * qcow2 version which doesn't yet support metadata zero clusters. 1966 */ 1967 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1968 BlockDriverAmendStatusCB *status_cb, 1969 void *cb_opaque) 1970 { 1971 BDRVQcow2State *s = bs->opaque; 1972 uint64_t *l1_table = NULL; 1973 int64_t l1_entries = 0, visited_l1_entries = 0; 1974 int ret; 1975 int i, j; 1976 1977 if (status_cb) { 1978 l1_entries = s->l1_size; 1979 for (i = 0; i < s->nb_snapshots; i++) { 1980 l1_entries += s->snapshots[i].l1_size; 1981 } 1982 } 1983 1984 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1985 &visited_l1_entries, l1_entries, 1986 status_cb, cb_opaque); 1987 if (ret < 0) { 1988 goto fail; 1989 } 1990 1991 /* Inactive L1 tables may point to active L2 tables - therefore it is 1992 * necessary to flush the L2 table cache before trying to access the L2 1993 * tables pointed to by inactive L1 entries (else we might try to expand 1994 * zero clusters that have already been expanded); furthermore, it is also 1995 * necessary to empty the L2 table cache, since it may contain tables which 1996 * are now going to be modified directly on disk, bypassing the cache. 1997 * qcow2_cache_empty() does both for us. */ 1998 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1999 if (ret < 0) { 2000 goto fail; 2001 } 2002 2003 for (i = 0; i < s->nb_snapshots; i++) { 2004 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size * 2005 sizeof(uint64_t), BDRV_SECTOR_SIZE); 2006 2007 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 2008 2009 ret = bdrv_read(bs->file, 2010 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, 2011 (void *)l1_table, l1_sectors); 2012 if (ret < 0) { 2013 goto fail; 2014 } 2015 2016 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2017 be64_to_cpus(&l1_table[j]); 2018 } 2019 2020 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2021 &visited_l1_entries, l1_entries, 2022 status_cb, cb_opaque); 2023 if (ret < 0) { 2024 goto fail; 2025 } 2026 } 2027 2028 ret = 0; 2029 2030 fail: 2031 g_free(l1_table); 2032 return ret; 2033 } 2034