1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "qcow2.h" 32 #include "qemu/bswap.h" 33 #include "trace.h" 34 35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 36 { 37 BDRVQcow2State *s = bs->opaque; 38 int new_l1_size, i, ret; 39 40 if (exact_size >= s->l1_size) { 41 return 0; 42 } 43 44 new_l1_size = exact_size; 45 46 #ifdef DEBUG_ALLOC2 47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 48 #endif 49 50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 52 new_l1_size * sizeof(uint64_t), 53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); 54 if (ret < 0) { 55 goto fail; 56 } 57 58 ret = bdrv_flush(bs->file->bs); 59 if (ret < 0) { 60 goto fail; 61 } 62 63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 66 continue; 67 } 68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 69 s->cluster_size, QCOW2_DISCARD_ALWAYS); 70 s->l1_table[i] = 0; 71 } 72 return 0; 73 74 fail: 75 /* 76 * If the write in the l1_table failed the image may contain a partially 77 * overwritten l1_table. In this case it would be better to clear the 78 * l1_table in memory to avoid possible image corruption. 79 */ 80 memset(s->l1_table + new_l1_size, 0, 81 (s->l1_size - new_l1_size) * sizeof(uint64_t)); 82 return ret; 83 } 84 85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 86 bool exact_size) 87 { 88 BDRVQcow2State *s = bs->opaque; 89 int new_l1_size2, ret, i; 90 uint64_t *new_l1_table; 91 int64_t old_l1_table_offset, old_l1_size; 92 int64_t new_l1_table_offset, new_l1_size; 93 uint8_t data[12]; 94 95 if (min_size <= s->l1_size) 96 return 0; 97 98 /* Do a sanity check on min_size before trying to calculate new_l1_size 99 * (this prevents overflows during the while loop for the calculation of 100 * new_l1_size) */ 101 if (min_size > INT_MAX / sizeof(uint64_t)) { 102 return -EFBIG; 103 } 104 105 if (exact_size) { 106 new_l1_size = min_size; 107 } else { 108 /* Bump size up to reduce the number of times we have to grow */ 109 new_l1_size = s->l1_size; 110 if (new_l1_size == 0) { 111 new_l1_size = 1; 112 } 113 while (min_size > new_l1_size) { 114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 115 } 116 } 117 118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 120 return -EFBIG; 121 } 122 123 #ifdef DEBUG_ALLOC2 124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 125 s->l1_size, new_l1_size); 126 #endif 127 128 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 129 new_l1_table = qemu_try_blockalign(bs->file->bs, 130 ROUND_UP(new_l1_size2, 512)); 131 if (new_l1_table == NULL) { 132 return -ENOMEM; 133 } 134 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512)); 135 136 if (s->l1_size) { 137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 138 } 139 140 /* write new table (align to cluster) */ 141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 143 if (new_l1_table_offset < 0) { 144 qemu_vfree(new_l1_table); 145 return new_l1_table_offset; 146 } 147 148 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 149 if (ret < 0) { 150 goto fail; 151 } 152 153 /* the L1 position has not yet been updated, so these clusters must 154 * indeed be completely free */ 155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 156 new_l1_size2, false); 157 if (ret < 0) { 158 goto fail; 159 } 160 161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 162 for(i = 0; i < s->l1_size; i++) 163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 165 new_l1_table, new_l1_size2); 166 if (ret < 0) 167 goto fail; 168 for(i = 0; i < s->l1_size; i++) 169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 170 171 /* set new table */ 172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 173 stl_be_p(data, new_l1_size); 174 stq_be_p(data + 4, new_l1_table_offset); 175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 176 data, sizeof(data)); 177 if (ret < 0) { 178 goto fail; 179 } 180 qemu_vfree(s->l1_table); 181 old_l1_table_offset = s->l1_table_offset; 182 s->l1_table_offset = new_l1_table_offset; 183 s->l1_table = new_l1_table; 184 old_l1_size = s->l1_size; 185 s->l1_size = new_l1_size; 186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 187 QCOW2_DISCARD_OTHER); 188 return 0; 189 fail: 190 qemu_vfree(new_l1_table); 191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 192 QCOW2_DISCARD_OTHER); 193 return ret; 194 } 195 196 /* 197 * l2_load 198 * 199 * @bs: The BlockDriverState 200 * @offset: A guest offset, used to calculate what slice of the L2 201 * table to load. 202 * @l2_offset: Offset to the L2 table in the image file. 203 * @l2_slice: Location to store the pointer to the L2 slice. 204 * 205 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 206 * that are loaded by the qcow2 cache). If the slice is in the cache, 207 * the cache is used; otherwise the L2 slice is loaded from the image 208 * file. 209 */ 210 static int l2_load(BlockDriverState *bs, uint64_t offset, 211 uint64_t l2_offset, uint64_t **l2_slice) 212 { 213 BDRVQcow2State *s = bs->opaque; 214 int start_of_slice = sizeof(uint64_t) * 215 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 216 217 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 218 (void **)l2_slice); 219 } 220 221 /* 222 * Writes one sector of the L1 table to the disk (can't update single entries 223 * and we really don't want bdrv_pread to perform a read-modify-write) 224 */ 225 #define L1_ENTRIES_PER_SECTOR (512 / 8) 226 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 227 { 228 BDRVQcow2State *s = bs->opaque; 229 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 230 int l1_start_index; 231 int i, ret; 232 233 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 234 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 235 i++) 236 { 237 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 238 } 239 240 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 241 s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false); 242 if (ret < 0) { 243 return ret; 244 } 245 246 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 247 ret = bdrv_pwrite_sync(bs->file, 248 s->l1_table_offset + 8 * l1_start_index, 249 buf, sizeof(buf)); 250 if (ret < 0) { 251 return ret; 252 } 253 254 return 0; 255 } 256 257 /* 258 * l2_allocate 259 * 260 * Allocate a new l2 entry in the file. If l1_index points to an already 261 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 262 * table) copy the contents of the old L2 table into the newly allocated one. 263 * Otherwise the new table is initialized with zeros. 264 * 265 */ 266 267 static int l2_allocate(BlockDriverState *bs, int l1_index) 268 { 269 BDRVQcow2State *s = bs->opaque; 270 uint64_t old_l2_offset; 271 uint64_t *l2_slice = NULL; 272 unsigned slice, slice_size2, n_slices; 273 int64_t l2_offset; 274 int ret; 275 276 old_l2_offset = s->l1_table[l1_index]; 277 278 trace_qcow2_l2_allocate(bs, l1_index); 279 280 /* allocate a new l2 entry */ 281 282 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 283 if (l2_offset < 0) { 284 ret = l2_offset; 285 goto fail; 286 } 287 288 /* The offset must fit in the offset field of the L1 table entry */ 289 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); 290 291 /* If we're allocating the table at offset 0 then something is wrong */ 292 if (l2_offset == 0) { 293 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 294 "allocation of L2 table at offset 0"); 295 ret = -EIO; 296 goto fail; 297 } 298 299 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 300 if (ret < 0) { 301 goto fail; 302 } 303 304 /* allocate a new entry in the l2 cache */ 305 306 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 307 n_slices = s->cluster_size / slice_size2; 308 309 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 310 for (slice = 0; slice < n_slices; slice++) { 311 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 312 l2_offset + slice * slice_size2, 313 (void **) &l2_slice); 314 if (ret < 0) { 315 goto fail; 316 } 317 318 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 319 /* if there was no old l2 table, clear the new slice */ 320 memset(l2_slice, 0, slice_size2); 321 } else { 322 uint64_t *old_slice; 323 uint64_t old_l2_slice_offset = 324 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 325 326 /* if there was an old l2 table, read a slice from the disk */ 327 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 328 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 329 (void **) &old_slice); 330 if (ret < 0) { 331 goto fail; 332 } 333 334 memcpy(l2_slice, old_slice, slice_size2); 335 336 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 337 } 338 339 /* write the l2 slice to the file */ 340 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 341 342 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 343 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 344 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 345 } 346 347 ret = qcow2_cache_flush(bs, s->l2_table_cache); 348 if (ret < 0) { 349 goto fail; 350 } 351 352 /* update the L1 entry */ 353 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 354 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 355 ret = qcow2_write_l1_entry(bs, l1_index); 356 if (ret < 0) { 357 goto fail; 358 } 359 360 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 361 return 0; 362 363 fail: 364 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 365 if (l2_slice != NULL) { 366 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 367 } 368 s->l1_table[l1_index] = old_l2_offset; 369 if (l2_offset > 0) { 370 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 371 QCOW2_DISCARD_ALWAYS); 372 } 373 return ret; 374 } 375 376 /* 377 * Checks how many clusters in a given L2 slice are contiguous in the image 378 * file. As soon as one of the flags in the bitmask stop_flags changes compared 379 * to the first cluster, the search is stopped and the cluster is not counted 380 * as contiguous. (This allows it, for example, to stop at the first compressed 381 * cluster which may require a different handling) 382 */ 383 static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters, 384 int cluster_size, uint64_t *l2_slice, uint64_t stop_flags) 385 { 386 int i; 387 QCow2ClusterType first_cluster_type; 388 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 389 uint64_t first_entry = be64_to_cpu(l2_slice[0]); 390 uint64_t offset = first_entry & mask; 391 392 first_cluster_type = qcow2_get_cluster_type(bs, first_entry); 393 if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) { 394 return 0; 395 } 396 397 /* must be allocated */ 398 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 399 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 400 401 for (i = 0; i < nb_clusters; i++) { 402 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask; 403 if (offset + (uint64_t) i * cluster_size != l2_entry) { 404 break; 405 } 406 } 407 408 return i; 409 } 410 411 /* 412 * Checks how many consecutive unallocated clusters in a given L2 413 * slice have the same cluster type. 414 */ 415 static int count_contiguous_clusters_unallocated(BlockDriverState *bs, 416 int nb_clusters, 417 uint64_t *l2_slice, 418 QCow2ClusterType wanted_type) 419 { 420 int i; 421 422 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 423 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 424 for (i = 0; i < nb_clusters; i++) { 425 uint64_t entry = be64_to_cpu(l2_slice[i]); 426 QCow2ClusterType type = qcow2_get_cluster_type(bs, entry); 427 428 if (type != wanted_type) { 429 break; 430 } 431 } 432 433 return i; 434 } 435 436 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 437 uint64_t src_cluster_offset, 438 unsigned offset_in_cluster, 439 QEMUIOVector *qiov) 440 { 441 int ret; 442 443 if (qiov->size == 0) { 444 return 0; 445 } 446 447 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 448 449 if (!bs->drv) { 450 return -ENOMEDIUM; 451 } 452 453 /* Call .bdrv_co_readv() directly instead of using the public block-layer 454 * interface. This avoids double I/O throttling and request tracking, 455 * which can lead to deadlock when block layer copy-on-read is enabled. 456 */ 457 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 458 qiov->size, qiov, 0); 459 if (ret < 0) { 460 return ret; 461 } 462 463 return 0; 464 } 465 466 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, 467 uint64_t src_cluster_offset, 468 uint64_t cluster_offset, 469 unsigned offset_in_cluster, 470 uint8_t *buffer, 471 unsigned bytes) 472 { 473 if (bytes && bs->encrypted) { 474 BDRVQcow2State *s = bs->opaque; 475 int64_t offset = (s->crypt_physical_offset ? 476 (cluster_offset + offset_in_cluster) : 477 (src_cluster_offset + offset_in_cluster)); 478 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 479 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 480 assert(s->crypto); 481 if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) { 482 return false; 483 } 484 } 485 return true; 486 } 487 488 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 489 uint64_t cluster_offset, 490 unsigned offset_in_cluster, 491 QEMUIOVector *qiov) 492 { 493 BDRVQcow2State *s = bs->opaque; 494 int ret; 495 496 if (qiov->size == 0) { 497 return 0; 498 } 499 500 ret = qcow2_pre_write_overlap_check(bs, 0, 501 cluster_offset + offset_in_cluster, qiov->size, true); 502 if (ret < 0) { 503 return ret; 504 } 505 506 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 507 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, 508 qiov->size, qiov, 0); 509 if (ret < 0) { 510 return ret; 511 } 512 513 return 0; 514 } 515 516 517 /* 518 * get_cluster_offset 519 * 520 * For a given offset of the virtual disk, find the cluster type and offset in 521 * the qcow2 file. The offset is stored in *cluster_offset. 522 * 523 * On entry, *bytes is the maximum number of contiguous bytes starting at 524 * offset that we are interested in. 525 * 526 * On exit, *bytes is the number of bytes starting at offset that have the same 527 * cluster type and (if applicable) are stored contiguously in the image file. 528 * Compressed clusters are always returned one by one. 529 * 530 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 531 * cases. 532 */ 533 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 534 unsigned int *bytes, uint64_t *cluster_offset) 535 { 536 BDRVQcow2State *s = bs->opaque; 537 unsigned int l2_index; 538 uint64_t l1_index, l2_offset, *l2_slice; 539 int c; 540 unsigned int offset_in_cluster; 541 uint64_t bytes_available, bytes_needed, nb_clusters; 542 QCow2ClusterType type; 543 int ret; 544 545 offset_in_cluster = offset_into_cluster(s, offset); 546 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 547 548 /* compute how many bytes there are between the start of the cluster 549 * containing offset and the end of the l2 slice that contains 550 * the entry pointing to it */ 551 bytes_available = 552 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 553 << s->cluster_bits; 554 555 if (bytes_needed > bytes_available) { 556 bytes_needed = bytes_available; 557 } 558 559 *cluster_offset = 0; 560 561 /* seek to the l2 offset in the l1 table */ 562 563 l1_index = offset_to_l1_index(s, offset); 564 if (l1_index >= s->l1_size) { 565 type = QCOW2_CLUSTER_UNALLOCATED; 566 goto out; 567 } 568 569 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 570 if (!l2_offset) { 571 type = QCOW2_CLUSTER_UNALLOCATED; 572 goto out; 573 } 574 575 if (offset_into_cluster(s, l2_offset)) { 576 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 577 " unaligned (L1 index: %#" PRIx64 ")", 578 l2_offset, l1_index); 579 return -EIO; 580 } 581 582 /* load the l2 slice in memory */ 583 584 ret = l2_load(bs, offset, l2_offset, &l2_slice); 585 if (ret < 0) { 586 return ret; 587 } 588 589 /* find the cluster offset for the given disk offset */ 590 591 l2_index = offset_to_l2_slice_index(s, offset); 592 *cluster_offset = be64_to_cpu(l2_slice[l2_index]); 593 594 nb_clusters = size_to_clusters(s, bytes_needed); 595 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 596 * integers; the minimum cluster size is 512, so this assertion is always 597 * true */ 598 assert(nb_clusters <= INT_MAX); 599 600 type = qcow2_get_cluster_type(bs, *cluster_offset); 601 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 602 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 603 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 604 " in pre-v3 image (L2 offset: %#" PRIx64 605 ", L2 index: %#x)", l2_offset, l2_index); 606 ret = -EIO; 607 goto fail; 608 } 609 switch (type) { 610 case QCOW2_CLUSTER_COMPRESSED: 611 if (has_data_file(bs)) { 612 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " 613 "entry found in image with external data " 614 "file (L2 offset: %#" PRIx64 ", L2 index: " 615 "%#x)", l2_offset, l2_index); 616 ret = -EIO; 617 goto fail; 618 } 619 /* Compressed clusters can only be processed one by one */ 620 c = 1; 621 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 622 break; 623 case QCOW2_CLUSTER_ZERO_PLAIN: 624 case QCOW2_CLUSTER_UNALLOCATED: 625 /* how many empty clusters ? */ 626 c = count_contiguous_clusters_unallocated(bs, nb_clusters, 627 &l2_slice[l2_index], type); 628 *cluster_offset = 0; 629 break; 630 case QCOW2_CLUSTER_ZERO_ALLOC: 631 case QCOW2_CLUSTER_NORMAL: 632 /* how many allocated clusters ? */ 633 c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 634 &l2_slice[l2_index], QCOW_OFLAG_ZERO); 635 *cluster_offset &= L2E_OFFSET_MASK; 636 if (offset_into_cluster(s, *cluster_offset)) { 637 qcow2_signal_corruption(bs, true, -1, -1, 638 "Cluster allocation offset %#" 639 PRIx64 " unaligned (L2 offset: %#" PRIx64 640 ", L2 index: %#x)", *cluster_offset, 641 l2_offset, l2_index); 642 ret = -EIO; 643 goto fail; 644 } 645 if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster) 646 { 647 qcow2_signal_corruption(bs, true, -1, -1, 648 "External data file host cluster offset %#" 649 PRIx64 " does not match guest cluster " 650 "offset: %#" PRIx64 651 ", L2 index: %#x)", *cluster_offset, 652 offset - offset_in_cluster, l2_index); 653 ret = -EIO; 654 goto fail; 655 } 656 break; 657 default: 658 abort(); 659 } 660 661 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 662 663 bytes_available = (int64_t)c * s->cluster_size; 664 665 out: 666 if (bytes_available > bytes_needed) { 667 bytes_available = bytes_needed; 668 } 669 670 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 671 * subtracting offset_in_cluster will therefore definitely yield something 672 * not exceeding UINT_MAX */ 673 assert(bytes_available - offset_in_cluster <= UINT_MAX); 674 *bytes = bytes_available - offset_in_cluster; 675 676 return type; 677 678 fail: 679 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 680 return ret; 681 } 682 683 /* 684 * get_cluster_table 685 * 686 * for a given disk offset, load (and allocate if needed) 687 * the appropriate slice of its l2 table. 688 * 689 * the cluster index in the l2 slice is given to the caller. 690 * 691 * Returns 0 on success, -errno in failure case 692 */ 693 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 694 uint64_t **new_l2_slice, 695 int *new_l2_index) 696 { 697 BDRVQcow2State *s = bs->opaque; 698 unsigned int l2_index; 699 uint64_t l1_index, l2_offset; 700 uint64_t *l2_slice = NULL; 701 int ret; 702 703 /* seek to the l2 offset in the l1 table */ 704 705 l1_index = offset_to_l1_index(s, offset); 706 if (l1_index >= s->l1_size) { 707 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 708 if (ret < 0) { 709 return ret; 710 } 711 } 712 713 assert(l1_index < s->l1_size); 714 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 715 if (offset_into_cluster(s, l2_offset)) { 716 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 717 " unaligned (L1 index: %#" PRIx64 ")", 718 l2_offset, l1_index); 719 return -EIO; 720 } 721 722 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 723 /* First allocate a new L2 table (and do COW if needed) */ 724 ret = l2_allocate(bs, l1_index); 725 if (ret < 0) { 726 return ret; 727 } 728 729 /* Then decrease the refcount of the old table */ 730 if (l2_offset) { 731 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 732 QCOW2_DISCARD_OTHER); 733 } 734 735 /* Get the offset of the newly-allocated l2 table */ 736 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 737 assert(offset_into_cluster(s, l2_offset) == 0); 738 } 739 740 /* load the l2 slice in memory */ 741 ret = l2_load(bs, offset, l2_offset, &l2_slice); 742 if (ret < 0) { 743 return ret; 744 } 745 746 /* find the cluster offset for the given disk offset */ 747 748 l2_index = offset_to_l2_slice_index(s, offset); 749 750 *new_l2_slice = l2_slice; 751 *new_l2_index = l2_index; 752 753 return 0; 754 } 755 756 /* 757 * alloc_compressed_cluster_offset 758 * 759 * For a given offset on the virtual disk, allocate a new compressed cluster 760 * and put the host offset of the cluster into *host_offset. If a cluster is 761 * already allocated at the offset, return an error. 762 * 763 * Return 0 on success and -errno in error cases 764 */ 765 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 766 uint64_t offset, 767 int compressed_size, 768 uint64_t *host_offset) 769 { 770 BDRVQcow2State *s = bs->opaque; 771 int l2_index, ret; 772 uint64_t *l2_slice; 773 int64_t cluster_offset; 774 int nb_csectors; 775 776 if (has_data_file(bs)) { 777 return 0; 778 } 779 780 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 781 if (ret < 0) { 782 return ret; 783 } 784 785 /* Compression can't overwrite anything. Fail if the cluster was already 786 * allocated. */ 787 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 788 if (cluster_offset & L2E_OFFSET_MASK) { 789 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 790 return -EIO; 791 } 792 793 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 794 if (cluster_offset < 0) { 795 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 796 return cluster_offset; 797 } 798 799 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 800 (cluster_offset >> 9); 801 802 cluster_offset |= QCOW_OFLAG_COMPRESSED | 803 ((uint64_t)nb_csectors << s->csize_shift); 804 805 /* update L2 table */ 806 807 /* compressed clusters never have the copied flag */ 808 809 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 810 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 811 l2_slice[l2_index] = cpu_to_be64(cluster_offset); 812 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 813 814 *host_offset = cluster_offset & s->cluster_offset_mask; 815 return 0; 816 } 817 818 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 819 { 820 BDRVQcow2State *s = bs->opaque; 821 Qcow2COWRegion *start = &m->cow_start; 822 Qcow2COWRegion *end = &m->cow_end; 823 unsigned buffer_size; 824 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 825 bool merge_reads; 826 uint8_t *start_buffer, *end_buffer; 827 QEMUIOVector qiov; 828 int ret; 829 830 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 831 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 832 assert(start->offset + start->nb_bytes <= end->offset); 833 assert(!m->data_qiov || m->data_qiov->size == data_bytes); 834 835 if (start->nb_bytes == 0 && end->nb_bytes == 0) { 836 return 0; 837 } 838 839 /* If we have to read both the start and end COW regions and the 840 * middle region is not too large then perform just one read 841 * operation */ 842 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 843 if (merge_reads) { 844 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 845 } else { 846 /* If we have to do two reads, add some padding in the middle 847 * if necessary to make sure that the end region is optimally 848 * aligned. */ 849 size_t align = bdrv_opt_mem_align(bs); 850 assert(align > 0 && align <= UINT_MAX); 851 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 852 UINT_MAX - end->nb_bytes); 853 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 854 } 855 856 /* Reserve a buffer large enough to store all the data that we're 857 * going to read */ 858 start_buffer = qemu_try_blockalign(bs, buffer_size); 859 if (start_buffer == NULL) { 860 return -ENOMEM; 861 } 862 /* The part of the buffer where the end region is located */ 863 end_buffer = start_buffer + buffer_size - end->nb_bytes; 864 865 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); 866 867 qemu_co_mutex_unlock(&s->lock); 868 /* First we read the existing data from both COW regions. We 869 * either read the whole region in one go, or the start and end 870 * regions separately. */ 871 if (merge_reads) { 872 qemu_iovec_add(&qiov, start_buffer, buffer_size); 873 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 874 } else { 875 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 876 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 877 if (ret < 0) { 878 goto fail; 879 } 880 881 qemu_iovec_reset(&qiov); 882 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 883 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 884 } 885 if (ret < 0) { 886 goto fail; 887 } 888 889 /* Encrypt the data if necessary before writing it */ 890 if (bs->encrypted) { 891 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 892 start->offset, start_buffer, 893 start->nb_bytes) || 894 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 895 end->offset, end_buffer, end->nb_bytes)) { 896 ret = -EIO; 897 goto fail; 898 } 899 } 900 901 /* And now we can write everything. If we have the guest data we 902 * can write everything in one single operation */ 903 if (m->data_qiov) { 904 qemu_iovec_reset(&qiov); 905 if (start->nb_bytes) { 906 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 907 } 908 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); 909 if (end->nb_bytes) { 910 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 911 } 912 /* NOTE: we have a write_aio blkdebug event here followed by 913 * a cow_write one in do_perform_cow_write(), but there's only 914 * one single I/O operation */ 915 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 916 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 917 } else { 918 /* If there's no guest data then write both COW regions separately */ 919 qemu_iovec_reset(&qiov); 920 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 921 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 922 if (ret < 0) { 923 goto fail; 924 } 925 926 qemu_iovec_reset(&qiov); 927 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 928 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 929 } 930 931 fail: 932 qemu_co_mutex_lock(&s->lock); 933 934 /* 935 * Before we update the L2 table to actually point to the new cluster, we 936 * need to be sure that the refcounts have been increased and COW was 937 * handled. 938 */ 939 if (ret == 0) { 940 qcow2_cache_depends_on_flush(s->l2_table_cache); 941 } 942 943 qemu_vfree(start_buffer); 944 qemu_iovec_destroy(&qiov); 945 return ret; 946 } 947 948 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 949 { 950 BDRVQcow2State *s = bs->opaque; 951 int i, j = 0, l2_index, ret; 952 uint64_t *old_cluster, *l2_slice; 953 uint64_t cluster_offset = m->alloc_offset; 954 955 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 956 assert(m->nb_clusters > 0); 957 958 old_cluster = g_try_new(uint64_t, m->nb_clusters); 959 if (old_cluster == NULL) { 960 ret = -ENOMEM; 961 goto err; 962 } 963 964 /* copy content of unmodified sectors */ 965 ret = perform_cow(bs, m); 966 if (ret < 0) { 967 goto err; 968 } 969 970 /* Update L2 table. */ 971 if (s->use_lazy_refcounts) { 972 qcow2_mark_dirty(bs); 973 } 974 if (qcow2_need_accurate_refcounts(s)) { 975 qcow2_cache_set_dependency(bs, s->l2_table_cache, 976 s->refcount_block_cache); 977 } 978 979 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 980 if (ret < 0) { 981 goto err; 982 } 983 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 984 985 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 986 for (i = 0; i < m->nb_clusters; i++) { 987 /* if two concurrent writes happen to the same unallocated cluster 988 * each write allocates separate cluster and writes data concurrently. 989 * The first one to complete updates l2 table with pointer to its 990 * cluster the second one has to do RMW (which is done above by 991 * perform_cow()), update l2 table with its cluster pointer and free 992 * old cluster. This is what this loop does */ 993 if (l2_slice[l2_index + i] != 0) { 994 old_cluster[j++] = l2_slice[l2_index + i]; 995 } 996 997 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset + 998 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 999 } 1000 1001 1002 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1003 1004 /* 1005 * If this was a COW, we need to decrease the refcount of the old cluster. 1006 * 1007 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 1008 * clusters), the next write will reuse them anyway. 1009 */ 1010 if (!m->keep_old_clusters && j != 0) { 1011 for (i = 0; i < j; i++) { 1012 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 1013 QCOW2_DISCARD_NEVER); 1014 } 1015 } 1016 1017 ret = 0; 1018 err: 1019 g_free(old_cluster); 1020 return ret; 1021 } 1022 1023 /** 1024 * Frees the allocated clusters because the request failed and they won't 1025 * actually be linked. 1026 */ 1027 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) 1028 { 1029 BDRVQcow2State *s = bs->opaque; 1030 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits, 1031 QCOW2_DISCARD_NEVER); 1032 } 1033 1034 /* 1035 * Returns the number of contiguous clusters that can be used for an allocating 1036 * write, but require COW to be performed (this includes yet unallocated space, 1037 * which must copy from the backing file) 1038 */ 1039 static int count_cow_clusters(BlockDriverState *bs, int nb_clusters, 1040 uint64_t *l2_slice, int l2_index) 1041 { 1042 int i; 1043 1044 for (i = 0; i < nb_clusters; i++) { 1045 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1046 QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry); 1047 1048 switch(cluster_type) { 1049 case QCOW2_CLUSTER_NORMAL: 1050 if (l2_entry & QCOW_OFLAG_COPIED) { 1051 goto out; 1052 } 1053 break; 1054 case QCOW2_CLUSTER_UNALLOCATED: 1055 case QCOW2_CLUSTER_COMPRESSED: 1056 case QCOW2_CLUSTER_ZERO_PLAIN: 1057 case QCOW2_CLUSTER_ZERO_ALLOC: 1058 break; 1059 default: 1060 abort(); 1061 } 1062 } 1063 1064 out: 1065 assert(i <= nb_clusters); 1066 return i; 1067 } 1068 1069 /* 1070 * Check if there already is an AIO write request in flight which allocates 1071 * the same cluster. In this case we need to wait until the previous 1072 * request has completed and updated the L2 table accordingly. 1073 * 1074 * Returns: 1075 * 0 if there was no dependency. *cur_bytes indicates the number of 1076 * bytes from guest_offset that can be read before the next 1077 * dependency must be processed (or the request is complete) 1078 * 1079 * -EAGAIN if we had to wait for another request, previously gathered 1080 * information on cluster allocation may be invalid now. The caller 1081 * must start over anyway, so consider *cur_bytes undefined. 1082 */ 1083 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1084 uint64_t *cur_bytes, QCowL2Meta **m) 1085 { 1086 BDRVQcow2State *s = bs->opaque; 1087 QCowL2Meta *old_alloc; 1088 uint64_t bytes = *cur_bytes; 1089 1090 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1091 1092 uint64_t start = guest_offset; 1093 uint64_t end = start + bytes; 1094 uint64_t old_start = l2meta_cow_start(old_alloc); 1095 uint64_t old_end = l2meta_cow_end(old_alloc); 1096 1097 if (end <= old_start || start >= old_end) { 1098 /* No intersection */ 1099 } else { 1100 if (start < old_start) { 1101 /* Stop at the start of a running allocation */ 1102 bytes = old_start - start; 1103 } else { 1104 bytes = 0; 1105 } 1106 1107 /* Stop if already an l2meta exists. After yielding, it wouldn't 1108 * be valid any more, so we'd have to clean up the old L2Metas 1109 * and deal with requests depending on them before starting to 1110 * gather new ones. Not worth the trouble. */ 1111 if (bytes == 0 && *m) { 1112 *cur_bytes = 0; 1113 return 0; 1114 } 1115 1116 if (bytes == 0) { 1117 /* Wait for the dependency to complete. We need to recheck 1118 * the free/allocated clusters when we continue. */ 1119 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1120 return -EAGAIN; 1121 } 1122 } 1123 } 1124 1125 /* Make sure that existing clusters and new allocations are only used up to 1126 * the next dependency if we shortened the request above */ 1127 *cur_bytes = bytes; 1128 1129 return 0; 1130 } 1131 1132 /* 1133 * Checks how many already allocated clusters that don't require a copy on 1134 * write there are at the given guest_offset (up to *bytes). If *host_offset is 1135 * not INV_OFFSET, only physically contiguous clusters beginning at this host 1136 * offset are counted. 1137 * 1138 * Note that guest_offset may not be cluster aligned. In this case, the 1139 * returned *host_offset points to exact byte referenced by guest_offset and 1140 * therefore isn't cluster aligned as well. 1141 * 1142 * Returns: 1143 * 0: if no allocated clusters are available at the given offset. 1144 * *bytes is normally unchanged. It is set to 0 if the cluster 1145 * is allocated and doesn't need COW, but doesn't have the right 1146 * physical offset. 1147 * 1148 * 1: if allocated clusters that don't require a COW are available at 1149 * the requested offset. *bytes may have decreased and describes 1150 * the length of the area that can be written to. 1151 * 1152 * -errno: in error cases 1153 */ 1154 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1155 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1156 { 1157 BDRVQcow2State *s = bs->opaque; 1158 int l2_index; 1159 uint64_t cluster_offset; 1160 uint64_t *l2_slice; 1161 uint64_t nb_clusters; 1162 unsigned int keep_clusters; 1163 int ret; 1164 1165 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1166 *bytes); 1167 1168 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) 1169 == offset_into_cluster(s, *host_offset)); 1170 1171 /* 1172 * Calculate the number of clusters to look for. We stop at L2 slice 1173 * boundaries to keep things simple. 1174 */ 1175 nb_clusters = 1176 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1177 1178 l2_index = offset_to_l2_slice_index(s, guest_offset); 1179 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1180 assert(nb_clusters <= INT_MAX); 1181 1182 /* Find L2 entry for the first involved cluster */ 1183 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1184 if (ret < 0) { 1185 return ret; 1186 } 1187 1188 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 1189 1190 /* Check how many clusters are already allocated and don't need COW */ 1191 if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL 1192 && (cluster_offset & QCOW_OFLAG_COPIED)) 1193 { 1194 /* If a specific host_offset is required, check it */ 1195 bool offset_matches = 1196 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1197 1198 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1199 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1200 "%#llx unaligned (guest offset: %#" PRIx64 1201 ")", cluster_offset & L2E_OFFSET_MASK, 1202 guest_offset); 1203 ret = -EIO; 1204 goto out; 1205 } 1206 1207 if (*host_offset != INV_OFFSET && !offset_matches) { 1208 *bytes = 0; 1209 ret = 0; 1210 goto out; 1211 } 1212 1213 /* We keep all QCOW_OFLAG_COPIED clusters */ 1214 keep_clusters = 1215 count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 1216 &l2_slice[l2_index], 1217 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1218 assert(keep_clusters <= nb_clusters); 1219 1220 *bytes = MIN(*bytes, 1221 keep_clusters * s->cluster_size 1222 - offset_into_cluster(s, guest_offset)); 1223 1224 ret = 1; 1225 } else { 1226 ret = 0; 1227 } 1228 1229 /* Cleanup */ 1230 out: 1231 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1232 1233 /* Only return a host offset if we actually made progress. Otherwise we 1234 * would make requirements for handle_alloc() that it can't fulfill */ 1235 if (ret > 0) { 1236 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1237 + offset_into_cluster(s, guest_offset); 1238 } 1239 1240 return ret; 1241 } 1242 1243 /* 1244 * Allocates new clusters for the given guest_offset. 1245 * 1246 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1247 * contain the number of clusters that have been allocated and are contiguous 1248 * in the image file. 1249 * 1250 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file 1251 * at which the new clusters must start. *nb_clusters can be 0 on return in 1252 * this case if the cluster at host_offset is already in use. If *host_offset 1253 * is INV_OFFSET, the clusters can be allocated anywhere in the image file. 1254 * 1255 * *host_offset is updated to contain the offset into the image file at which 1256 * the first allocated cluster starts. 1257 * 1258 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1259 * function has been waiting for another request and the allocation must be 1260 * restarted, but the whole request should not be failed. 1261 */ 1262 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1263 uint64_t *host_offset, uint64_t *nb_clusters) 1264 { 1265 BDRVQcow2State *s = bs->opaque; 1266 1267 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1268 *host_offset, *nb_clusters); 1269 1270 if (has_data_file(bs)) { 1271 assert(*host_offset == INV_OFFSET || 1272 *host_offset == start_of_cluster(s, guest_offset)); 1273 *host_offset = start_of_cluster(s, guest_offset); 1274 return 0; 1275 } 1276 1277 /* Allocate new clusters */ 1278 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1279 if (*host_offset == INV_OFFSET) { 1280 int64_t cluster_offset = 1281 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1282 if (cluster_offset < 0) { 1283 return cluster_offset; 1284 } 1285 *host_offset = cluster_offset; 1286 return 0; 1287 } else { 1288 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1289 if (ret < 0) { 1290 return ret; 1291 } 1292 *nb_clusters = ret; 1293 return 0; 1294 } 1295 } 1296 1297 /* 1298 * Allocates new clusters for an area that either is yet unallocated or needs a 1299 * copy on write. If *host_offset is not INV_OFFSET, clusters are only 1300 * allocated if the new allocation can match the specified host offset. 1301 * 1302 * Note that guest_offset may not be cluster aligned. In this case, the 1303 * returned *host_offset points to exact byte referenced by guest_offset and 1304 * therefore isn't cluster aligned as well. 1305 * 1306 * Returns: 1307 * 0: if no clusters could be allocated. *bytes is set to 0, 1308 * *host_offset is left unchanged. 1309 * 1310 * 1: if new clusters were allocated. *bytes may be decreased if the 1311 * new allocation doesn't cover all of the requested area. 1312 * *host_offset is updated to contain the host offset of the first 1313 * newly allocated cluster. 1314 * 1315 * -errno: in error cases 1316 */ 1317 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1318 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1319 { 1320 BDRVQcow2State *s = bs->opaque; 1321 int l2_index; 1322 uint64_t *l2_slice; 1323 uint64_t entry; 1324 uint64_t nb_clusters; 1325 int ret; 1326 bool keep_old_clusters = false; 1327 1328 uint64_t alloc_cluster_offset = INV_OFFSET; 1329 1330 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1331 *bytes); 1332 assert(*bytes > 0); 1333 1334 /* 1335 * Calculate the number of clusters to look for. We stop at L2 slice 1336 * boundaries to keep things simple. 1337 */ 1338 nb_clusters = 1339 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1340 1341 l2_index = offset_to_l2_slice_index(s, guest_offset); 1342 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1343 assert(nb_clusters <= INT_MAX); 1344 1345 /* Find L2 entry for the first involved cluster */ 1346 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1347 if (ret < 0) { 1348 return ret; 1349 } 1350 1351 entry = be64_to_cpu(l2_slice[l2_index]); 1352 1353 /* For the moment, overwrite compressed clusters one by one */ 1354 if (entry & QCOW_OFLAG_COMPRESSED) { 1355 nb_clusters = 1; 1356 } else { 1357 nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index); 1358 } 1359 1360 /* This function is only called when there were no non-COW clusters, so if 1361 * we can't find any unallocated or COW clusters either, something is 1362 * wrong with our code. */ 1363 assert(nb_clusters > 0); 1364 1365 if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1366 (entry & QCOW_OFLAG_COPIED) && 1367 (*host_offset == INV_OFFSET || 1368 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1369 { 1370 int preallocated_nb_clusters; 1371 1372 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) { 1373 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero " 1374 "cluster offset %#llx unaligned (guest " 1375 "offset: %#" PRIx64 ")", 1376 entry & L2E_OFFSET_MASK, guest_offset); 1377 ret = -EIO; 1378 goto fail; 1379 } 1380 1381 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1382 * would be fine, too, but count_cow_clusters() above has limited 1383 * nb_clusters already to a range of COW clusters */ 1384 preallocated_nb_clusters = 1385 count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 1386 &l2_slice[l2_index], QCOW_OFLAG_COPIED); 1387 assert(preallocated_nb_clusters > 0); 1388 1389 nb_clusters = preallocated_nb_clusters; 1390 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1391 1392 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1393 * should not free them. */ 1394 keep_old_clusters = true; 1395 } 1396 1397 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1398 1399 if (alloc_cluster_offset == INV_OFFSET) { 1400 /* Allocate, if necessary at a given offset in the image file */ 1401 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : 1402 start_of_cluster(s, *host_offset); 1403 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1404 &nb_clusters); 1405 if (ret < 0) { 1406 goto fail; 1407 } 1408 1409 /* Can't extend contiguous allocation */ 1410 if (nb_clusters == 0) { 1411 *bytes = 0; 1412 return 0; 1413 } 1414 1415 assert(alloc_cluster_offset != INV_OFFSET); 1416 } 1417 1418 /* 1419 * Save info needed for meta data update. 1420 * 1421 * requested_bytes: Number of bytes from the start of the first 1422 * newly allocated cluster to the end of the (possibly shortened 1423 * before) write request. 1424 * 1425 * avail_bytes: Number of bytes from the start of the first 1426 * newly allocated to the end of the last newly allocated cluster. 1427 * 1428 * nb_bytes: The number of bytes from the start of the first 1429 * newly allocated cluster to the end of the area that the write 1430 * request actually writes to (excluding COW at the end) 1431 */ 1432 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1433 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1434 int nb_bytes = MIN(requested_bytes, avail_bytes); 1435 QCowL2Meta *old_m = *m; 1436 1437 *m = g_malloc0(sizeof(**m)); 1438 1439 **m = (QCowL2Meta) { 1440 .next = old_m, 1441 1442 .alloc_offset = alloc_cluster_offset, 1443 .offset = start_of_cluster(s, guest_offset), 1444 .nb_clusters = nb_clusters, 1445 1446 .keep_old_clusters = keep_old_clusters, 1447 1448 .cow_start = { 1449 .offset = 0, 1450 .nb_bytes = offset_into_cluster(s, guest_offset), 1451 }, 1452 .cow_end = { 1453 .offset = nb_bytes, 1454 .nb_bytes = avail_bytes - nb_bytes, 1455 }, 1456 }; 1457 qemu_co_queue_init(&(*m)->dependent_requests); 1458 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1459 1460 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1461 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1462 assert(*bytes != 0); 1463 1464 return 1; 1465 1466 fail: 1467 if (*m && (*m)->nb_clusters > 0) { 1468 QLIST_REMOVE(*m, next_in_flight); 1469 } 1470 return ret; 1471 } 1472 1473 /* 1474 * alloc_cluster_offset 1475 * 1476 * For a given offset on the virtual disk, find the cluster offset in qcow2 1477 * file. If the offset is not found, allocate a new cluster. 1478 * 1479 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1480 * other fields in m are meaningless. 1481 * 1482 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1483 * contiguous clusters that have been allocated. In this case, the other 1484 * fields of m are valid and contain information about the first allocated 1485 * cluster. 1486 * 1487 * If the request conflicts with another write request in flight, the coroutine 1488 * is queued and will be reentered when the dependency has completed. 1489 * 1490 * Return 0 on success and -errno in error cases 1491 */ 1492 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1493 unsigned int *bytes, uint64_t *host_offset, 1494 QCowL2Meta **m) 1495 { 1496 BDRVQcow2State *s = bs->opaque; 1497 uint64_t start, remaining; 1498 uint64_t cluster_offset; 1499 uint64_t cur_bytes; 1500 int ret; 1501 1502 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1503 1504 again: 1505 start = offset; 1506 remaining = *bytes; 1507 cluster_offset = INV_OFFSET; 1508 *host_offset = INV_OFFSET; 1509 cur_bytes = 0; 1510 *m = NULL; 1511 1512 while (true) { 1513 1514 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { 1515 *host_offset = start_of_cluster(s, cluster_offset); 1516 } 1517 1518 assert(remaining >= cur_bytes); 1519 1520 start += cur_bytes; 1521 remaining -= cur_bytes; 1522 1523 if (cluster_offset != INV_OFFSET) { 1524 cluster_offset += cur_bytes; 1525 } 1526 1527 if (remaining == 0) { 1528 break; 1529 } 1530 1531 cur_bytes = remaining; 1532 1533 /* 1534 * Now start gathering as many contiguous clusters as possible: 1535 * 1536 * 1. Check for overlaps with in-flight allocations 1537 * 1538 * a) Overlap not in the first cluster -> shorten this request and 1539 * let the caller handle the rest in its next loop iteration. 1540 * 1541 * b) Real overlaps of two requests. Yield and restart the search 1542 * for contiguous clusters (the situation could have changed 1543 * while we were sleeping) 1544 * 1545 * c) TODO: Request starts in the same cluster as the in-flight 1546 * allocation ends. Shorten the COW of the in-fight allocation, 1547 * set cluster_offset to write to the same cluster and set up 1548 * the right synchronisation between the in-flight request and 1549 * the new one. 1550 */ 1551 ret = handle_dependencies(bs, start, &cur_bytes, m); 1552 if (ret == -EAGAIN) { 1553 /* Currently handle_dependencies() doesn't yield if we already had 1554 * an allocation. If it did, we would have to clean up the L2Meta 1555 * structs before starting over. */ 1556 assert(*m == NULL); 1557 goto again; 1558 } else if (ret < 0) { 1559 return ret; 1560 } else if (cur_bytes == 0) { 1561 break; 1562 } else { 1563 /* handle_dependencies() may have decreased cur_bytes (shortened 1564 * the allocations below) so that the next dependency is processed 1565 * correctly during the next loop iteration. */ 1566 } 1567 1568 /* 1569 * 2. Count contiguous COPIED clusters. 1570 */ 1571 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1572 if (ret < 0) { 1573 return ret; 1574 } else if (ret) { 1575 continue; 1576 } else if (cur_bytes == 0) { 1577 break; 1578 } 1579 1580 /* 1581 * 3. If the request still hasn't completed, allocate new clusters, 1582 * considering any cluster_offset of steps 1c or 2. 1583 */ 1584 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1585 if (ret < 0) { 1586 return ret; 1587 } else if (ret) { 1588 continue; 1589 } else { 1590 assert(cur_bytes == 0); 1591 break; 1592 } 1593 } 1594 1595 *bytes -= remaining; 1596 assert(*bytes > 0); 1597 assert(*host_offset != INV_OFFSET); 1598 1599 return 0; 1600 } 1601 1602 /* 1603 * This discards as many clusters of nb_clusters as possible at once (i.e. 1604 * all clusters in the same L2 slice) and returns the number of discarded 1605 * clusters. 1606 */ 1607 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1608 uint64_t nb_clusters, 1609 enum qcow2_discard_type type, bool full_discard) 1610 { 1611 BDRVQcow2State *s = bs->opaque; 1612 uint64_t *l2_slice; 1613 int l2_index; 1614 int ret; 1615 int i; 1616 1617 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1618 if (ret < 0) { 1619 return ret; 1620 } 1621 1622 /* Limit nb_clusters to one L2 slice */ 1623 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1624 assert(nb_clusters <= INT_MAX); 1625 1626 for (i = 0; i < nb_clusters; i++) { 1627 uint64_t old_l2_entry; 1628 1629 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1630 1631 /* 1632 * If full_discard is false, make sure that a discarded area reads back 1633 * as zeroes for v3 images (we cannot do it for v2 without actually 1634 * writing a zero-filled buffer). We can skip the operation if the 1635 * cluster is already marked as zero, or if it's unallocated and we 1636 * don't have a backing file. 1637 * 1638 * TODO We might want to use bdrv_block_status(bs) here, but we're 1639 * holding s->lock, so that doesn't work today. 1640 * 1641 * If full_discard is true, the sector should not read back as zeroes, 1642 * but rather fall through to the backing file. 1643 */ 1644 switch (qcow2_get_cluster_type(bs, old_l2_entry)) { 1645 case QCOW2_CLUSTER_UNALLOCATED: 1646 if (full_discard || !bs->backing) { 1647 continue; 1648 } 1649 break; 1650 1651 case QCOW2_CLUSTER_ZERO_PLAIN: 1652 if (!full_discard) { 1653 continue; 1654 } 1655 break; 1656 1657 case QCOW2_CLUSTER_ZERO_ALLOC: 1658 case QCOW2_CLUSTER_NORMAL: 1659 case QCOW2_CLUSTER_COMPRESSED: 1660 break; 1661 1662 default: 1663 abort(); 1664 } 1665 1666 /* First remove L2 entries */ 1667 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1668 if (!full_discard && s->qcow_version >= 3) { 1669 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1670 } else { 1671 l2_slice[l2_index + i] = cpu_to_be64(0); 1672 } 1673 1674 /* Then decrease the refcount */ 1675 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1676 } 1677 1678 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1679 1680 return nb_clusters; 1681 } 1682 1683 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1684 uint64_t bytes, enum qcow2_discard_type type, 1685 bool full_discard) 1686 { 1687 BDRVQcow2State *s = bs->opaque; 1688 uint64_t end_offset = offset + bytes; 1689 uint64_t nb_clusters; 1690 int64_t cleared; 1691 int ret; 1692 1693 /* Caller must pass aligned values, except at image end */ 1694 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1695 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1696 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1697 1698 nb_clusters = size_to_clusters(s, bytes); 1699 1700 s->cache_discards = true; 1701 1702 /* Each L2 slice is handled by its own loop iteration */ 1703 while (nb_clusters > 0) { 1704 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1705 full_discard); 1706 if (cleared < 0) { 1707 ret = cleared; 1708 goto fail; 1709 } 1710 1711 nb_clusters -= cleared; 1712 offset += (cleared * s->cluster_size); 1713 } 1714 1715 ret = 0; 1716 fail: 1717 s->cache_discards = false; 1718 qcow2_process_discards(bs, ret); 1719 1720 return ret; 1721 } 1722 1723 /* 1724 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1725 * all clusters in the same L2 slice) and returns the number of zeroed 1726 * clusters. 1727 */ 1728 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1729 uint64_t nb_clusters, int flags) 1730 { 1731 BDRVQcow2State *s = bs->opaque; 1732 uint64_t *l2_slice; 1733 int l2_index; 1734 int ret; 1735 int i; 1736 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1737 1738 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1739 if (ret < 0) { 1740 return ret; 1741 } 1742 1743 /* Limit nb_clusters to one L2 slice */ 1744 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1745 assert(nb_clusters <= INT_MAX); 1746 1747 for (i = 0; i < nb_clusters; i++) { 1748 uint64_t old_offset; 1749 QCow2ClusterType cluster_type; 1750 1751 old_offset = be64_to_cpu(l2_slice[l2_index + i]); 1752 1753 /* 1754 * Minimize L2 changes if the cluster already reads back as 1755 * zeroes with correct allocation. 1756 */ 1757 cluster_type = qcow2_get_cluster_type(bs, old_offset); 1758 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1759 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1760 continue; 1761 } 1762 1763 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1764 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1765 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1766 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1767 } else { 1768 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1769 } 1770 } 1771 1772 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1773 1774 return nb_clusters; 1775 } 1776 1777 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1778 uint64_t bytes, int flags) 1779 { 1780 BDRVQcow2State *s = bs->opaque; 1781 uint64_t end_offset = offset + bytes; 1782 uint64_t nb_clusters; 1783 int64_t cleared; 1784 int ret; 1785 1786 /* If we have to stay in sync with an external data file, zero out 1787 * s->data_file first. */ 1788 if (data_file_is_raw(bs)) { 1789 assert(has_data_file(bs)); 1790 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); 1791 if (ret < 0) { 1792 return ret; 1793 } 1794 } 1795 1796 /* Caller must pass aligned values, except at image end */ 1797 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1798 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1799 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1800 1801 /* The zero flag is only supported by version 3 and newer */ 1802 if (s->qcow_version < 3) { 1803 return -ENOTSUP; 1804 } 1805 1806 /* Each L2 slice is handled by its own loop iteration */ 1807 nb_clusters = size_to_clusters(s, bytes); 1808 1809 s->cache_discards = true; 1810 1811 while (nb_clusters > 0) { 1812 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 1813 if (cleared < 0) { 1814 ret = cleared; 1815 goto fail; 1816 } 1817 1818 nb_clusters -= cleared; 1819 offset += (cleared * s->cluster_size); 1820 } 1821 1822 ret = 0; 1823 fail: 1824 s->cache_discards = false; 1825 qcow2_process_discards(bs, ret); 1826 1827 return ret; 1828 } 1829 1830 /* 1831 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1832 * non-backed non-pre-allocated zero clusters). 1833 * 1834 * l1_entries and *visited_l1_entries are used to keep track of progress for 1835 * status_cb(). l1_entries contains the total number of L1 entries and 1836 * *visited_l1_entries counts all visited L1 entries. 1837 */ 1838 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1839 int l1_size, int64_t *visited_l1_entries, 1840 int64_t l1_entries, 1841 BlockDriverAmendStatusCB *status_cb, 1842 void *cb_opaque) 1843 { 1844 BDRVQcow2State *s = bs->opaque; 1845 bool is_active_l1 = (l1_table == s->l1_table); 1846 uint64_t *l2_slice = NULL; 1847 unsigned slice, slice_size2, n_slices; 1848 int ret; 1849 int i, j; 1850 1851 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 1852 n_slices = s->cluster_size / slice_size2; 1853 1854 if (!is_active_l1) { 1855 /* inactive L2 tables require a buffer to be stored in when loading 1856 * them from disk */ 1857 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 1858 if (l2_slice == NULL) { 1859 return -ENOMEM; 1860 } 1861 } 1862 1863 for (i = 0; i < l1_size; i++) { 1864 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1865 uint64_t l2_refcount; 1866 1867 if (!l2_offset) { 1868 /* unallocated */ 1869 (*visited_l1_entries)++; 1870 if (status_cb) { 1871 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1872 } 1873 continue; 1874 } 1875 1876 if (offset_into_cluster(s, l2_offset)) { 1877 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1878 PRIx64 " unaligned (L1 index: %#x)", 1879 l2_offset, i); 1880 ret = -EIO; 1881 goto fail; 1882 } 1883 1884 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1885 &l2_refcount); 1886 if (ret < 0) { 1887 goto fail; 1888 } 1889 1890 for (slice = 0; slice < n_slices; slice++) { 1891 uint64_t slice_offset = l2_offset + slice * slice_size2; 1892 bool l2_dirty = false; 1893 if (is_active_l1) { 1894 /* get active L2 tables from cache */ 1895 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 1896 (void **)&l2_slice); 1897 } else { 1898 /* load inactive L2 tables from disk */ 1899 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 1900 } 1901 if (ret < 0) { 1902 goto fail; 1903 } 1904 1905 for (j = 0; j < s->l2_slice_size; j++) { 1906 uint64_t l2_entry = be64_to_cpu(l2_slice[j]); 1907 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1908 QCow2ClusterType cluster_type = 1909 qcow2_get_cluster_type(bs, l2_entry); 1910 1911 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1912 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1913 continue; 1914 } 1915 1916 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1917 if (!bs->backing) { 1918 /* not backed; therefore we can simply deallocate the 1919 * cluster */ 1920 l2_slice[j] = 0; 1921 l2_dirty = true; 1922 continue; 1923 } 1924 1925 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1926 if (offset < 0) { 1927 ret = offset; 1928 goto fail; 1929 } 1930 1931 if (l2_refcount > 1) { 1932 /* For shared L2 tables, set the refcount accordingly 1933 * (it is already 1 and needs to be l2_refcount) */ 1934 ret = qcow2_update_cluster_refcount( 1935 bs, offset >> s->cluster_bits, 1936 refcount_diff(1, l2_refcount), false, 1937 QCOW2_DISCARD_OTHER); 1938 if (ret < 0) { 1939 qcow2_free_clusters(bs, offset, s->cluster_size, 1940 QCOW2_DISCARD_OTHER); 1941 goto fail; 1942 } 1943 } 1944 } 1945 1946 if (offset_into_cluster(s, offset)) { 1947 int l2_index = slice * s->l2_slice_size + j; 1948 qcow2_signal_corruption( 1949 bs, true, -1, -1, 1950 "Cluster allocation offset " 1951 "%#" PRIx64 " unaligned (L2 offset: %#" 1952 PRIx64 ", L2 index: %#x)", offset, 1953 l2_offset, l2_index); 1954 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1955 qcow2_free_clusters(bs, offset, s->cluster_size, 1956 QCOW2_DISCARD_ALWAYS); 1957 } 1958 ret = -EIO; 1959 goto fail; 1960 } 1961 1962 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 1963 s->cluster_size, true); 1964 if (ret < 0) { 1965 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1966 qcow2_free_clusters(bs, offset, s->cluster_size, 1967 QCOW2_DISCARD_ALWAYS); 1968 } 1969 goto fail; 1970 } 1971 1972 ret = bdrv_pwrite_zeroes(s->data_file, offset, 1973 s->cluster_size, 0); 1974 if (ret < 0) { 1975 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1976 qcow2_free_clusters(bs, offset, s->cluster_size, 1977 QCOW2_DISCARD_ALWAYS); 1978 } 1979 goto fail; 1980 } 1981 1982 if (l2_refcount == 1) { 1983 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1984 } else { 1985 l2_slice[j] = cpu_to_be64(offset); 1986 } 1987 l2_dirty = true; 1988 } 1989 1990 if (is_active_l1) { 1991 if (l2_dirty) { 1992 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1993 qcow2_cache_depends_on_flush(s->l2_table_cache); 1994 } 1995 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1996 } else { 1997 if (l2_dirty) { 1998 ret = qcow2_pre_write_overlap_check( 1999 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 2000 slice_offset, slice_size2, false); 2001 if (ret < 0) { 2002 goto fail; 2003 } 2004 2005 ret = bdrv_pwrite(bs->file, slice_offset, 2006 l2_slice, slice_size2); 2007 if (ret < 0) { 2008 goto fail; 2009 } 2010 } 2011 } 2012 } 2013 2014 (*visited_l1_entries)++; 2015 if (status_cb) { 2016 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2017 } 2018 } 2019 2020 ret = 0; 2021 2022 fail: 2023 if (l2_slice) { 2024 if (!is_active_l1) { 2025 qemu_vfree(l2_slice); 2026 } else { 2027 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2028 } 2029 } 2030 return ret; 2031 } 2032 2033 /* 2034 * For backed images, expands all zero clusters on the image. For non-backed 2035 * images, deallocates all non-pre-allocated zero clusters (and claims the 2036 * allocation for pre-allocated ones). This is important for downgrading to a 2037 * qcow2 version which doesn't yet support metadata zero clusters. 2038 */ 2039 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2040 BlockDriverAmendStatusCB *status_cb, 2041 void *cb_opaque) 2042 { 2043 BDRVQcow2State *s = bs->opaque; 2044 uint64_t *l1_table = NULL; 2045 int64_t l1_entries = 0, visited_l1_entries = 0; 2046 int ret; 2047 int i, j; 2048 2049 if (status_cb) { 2050 l1_entries = s->l1_size; 2051 for (i = 0; i < s->nb_snapshots; i++) { 2052 l1_entries += s->snapshots[i].l1_size; 2053 } 2054 } 2055 2056 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2057 &visited_l1_entries, l1_entries, 2058 status_cb, cb_opaque); 2059 if (ret < 0) { 2060 goto fail; 2061 } 2062 2063 /* Inactive L1 tables may point to active L2 tables - therefore it is 2064 * necessary to flush the L2 table cache before trying to access the L2 2065 * tables pointed to by inactive L1 entries (else we might try to expand 2066 * zero clusters that have already been expanded); furthermore, it is also 2067 * necessary to empty the L2 table cache, since it may contain tables which 2068 * are now going to be modified directly on disk, bypassing the cache. 2069 * qcow2_cache_empty() does both for us. */ 2070 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2071 if (ret < 0) { 2072 goto fail; 2073 } 2074 2075 for (i = 0; i < s->nb_snapshots; i++) { 2076 int l1_size2; 2077 uint64_t *new_l1_table; 2078 Error *local_err = NULL; 2079 2080 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, 2081 s->snapshots[i].l1_size, sizeof(uint64_t), 2082 QCOW_MAX_L1_SIZE, "Snapshot L1 table", 2083 &local_err); 2084 if (ret < 0) { 2085 error_report_err(local_err); 2086 goto fail; 2087 } 2088 2089 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t); 2090 new_l1_table = g_try_realloc(l1_table, l1_size2); 2091 2092 if (!new_l1_table) { 2093 ret = -ENOMEM; 2094 goto fail; 2095 } 2096 2097 l1_table = new_l1_table; 2098 2099 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, 2100 l1_table, l1_size2); 2101 if (ret < 0) { 2102 goto fail; 2103 } 2104 2105 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2106 be64_to_cpus(&l1_table[j]); 2107 } 2108 2109 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2110 &visited_l1_entries, l1_entries, 2111 status_cb, cb_opaque); 2112 if (ret < 0) { 2113 goto fail; 2114 } 2115 } 2116 2117 ret = 0; 2118 2119 fail: 2120 g_free(l1_table); 2121 return ret; 2122 } 2123