1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qemu-common.h" 29 #include "block/block_int.h" 30 #include "block/qcow2.h" 31 #include "qemu/bswap.h" 32 #include "trace.h" 33 34 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 35 { 36 BDRVQcow2State *s = bs->opaque; 37 int new_l1_size, i, ret; 38 39 if (exact_size >= s->l1_size) { 40 return 0; 41 } 42 43 new_l1_size = exact_size; 44 45 #ifdef DEBUG_ALLOC2 46 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 47 #endif 48 49 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 50 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 51 new_l1_size * sizeof(uint64_t), 52 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); 53 if (ret < 0) { 54 goto fail; 55 } 56 57 ret = bdrv_flush(bs->file->bs); 58 if (ret < 0) { 59 goto fail; 60 } 61 62 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 63 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 64 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 65 continue; 66 } 67 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 68 s->cluster_size, QCOW2_DISCARD_ALWAYS); 69 s->l1_table[i] = 0; 70 } 71 return 0; 72 73 fail: 74 /* 75 * If the write in the l1_table failed the image may contain a partially 76 * overwritten l1_table. In this case it would be better to clear the 77 * l1_table in memory to avoid possible image corruption. 78 */ 79 memset(s->l1_table + new_l1_size, 0, 80 (s->l1_size - new_l1_size) * sizeof(uint64_t)); 81 return ret; 82 } 83 84 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 85 bool exact_size) 86 { 87 BDRVQcow2State *s = bs->opaque; 88 int new_l1_size2, ret, i; 89 uint64_t *new_l1_table; 90 int64_t old_l1_table_offset, old_l1_size; 91 int64_t new_l1_table_offset, new_l1_size; 92 uint8_t data[12]; 93 94 if (min_size <= s->l1_size) 95 return 0; 96 97 /* Do a sanity check on min_size before trying to calculate new_l1_size 98 * (this prevents overflows during the while loop for the calculation of 99 * new_l1_size) */ 100 if (min_size > INT_MAX / sizeof(uint64_t)) { 101 return -EFBIG; 102 } 103 104 if (exact_size) { 105 new_l1_size = min_size; 106 } else { 107 /* Bump size up to reduce the number of times we have to grow */ 108 new_l1_size = s->l1_size; 109 if (new_l1_size == 0) { 110 new_l1_size = 1; 111 } 112 while (min_size > new_l1_size) { 113 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 114 } 115 } 116 117 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 118 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 119 return -EFBIG; 120 } 121 122 #ifdef DEBUG_ALLOC2 123 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 124 s->l1_size, new_l1_size); 125 #endif 126 127 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 128 new_l1_table = qemu_try_blockalign(bs->file->bs, 129 ROUND_UP(new_l1_size2, 512)); 130 if (new_l1_table == NULL) { 131 return -ENOMEM; 132 } 133 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512)); 134 135 if (s->l1_size) { 136 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 137 } 138 139 /* write new table (align to cluster) */ 140 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 141 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 142 if (new_l1_table_offset < 0) { 143 qemu_vfree(new_l1_table); 144 return new_l1_table_offset; 145 } 146 147 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 148 if (ret < 0) { 149 goto fail; 150 } 151 152 /* the L1 position has not yet been updated, so these clusters must 153 * indeed be completely free */ 154 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 155 new_l1_size2); 156 if (ret < 0) { 157 goto fail; 158 } 159 160 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 161 for(i = 0; i < s->l1_size; i++) 162 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 163 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 164 new_l1_table, new_l1_size2); 165 if (ret < 0) 166 goto fail; 167 for(i = 0; i < s->l1_size; i++) 168 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 169 170 /* set new table */ 171 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 172 stl_be_p(data, new_l1_size); 173 stq_be_p(data + 4, new_l1_table_offset); 174 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 175 data, sizeof(data)); 176 if (ret < 0) { 177 goto fail; 178 } 179 qemu_vfree(s->l1_table); 180 old_l1_table_offset = s->l1_table_offset; 181 s->l1_table_offset = new_l1_table_offset; 182 s->l1_table = new_l1_table; 183 old_l1_size = s->l1_size; 184 s->l1_size = new_l1_size; 185 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 186 QCOW2_DISCARD_OTHER); 187 return 0; 188 fail: 189 qemu_vfree(new_l1_table); 190 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 191 QCOW2_DISCARD_OTHER); 192 return ret; 193 } 194 195 /* 196 * l2_load 197 * 198 * @bs: The BlockDriverState 199 * @offset: A guest offset, used to calculate what slice of the L2 200 * table to load. 201 * @l2_offset: Offset to the L2 table in the image file. 202 * @l2_slice: Location to store the pointer to the L2 slice. 203 * 204 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 205 * that are loaded by the qcow2 cache). If the slice is in the cache, 206 * the cache is used; otherwise the L2 slice is loaded from the image 207 * file. 208 */ 209 static int l2_load(BlockDriverState *bs, uint64_t offset, 210 uint64_t l2_offset, uint64_t **l2_slice) 211 { 212 BDRVQcow2State *s = bs->opaque; 213 int start_of_slice = sizeof(uint64_t) * 214 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 215 216 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 217 (void **)l2_slice); 218 } 219 220 /* 221 * Writes one sector of the L1 table to the disk (can't update single entries 222 * and we really don't want bdrv_pread to perform a read-modify-write) 223 */ 224 #define L1_ENTRIES_PER_SECTOR (512 / 8) 225 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 226 { 227 BDRVQcow2State *s = bs->opaque; 228 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 229 int l1_start_index; 230 int i, ret; 231 232 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 233 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 234 i++) 235 { 236 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 237 } 238 239 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 240 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 241 if (ret < 0) { 242 return ret; 243 } 244 245 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 246 ret = bdrv_pwrite_sync(bs->file, 247 s->l1_table_offset + 8 * l1_start_index, 248 buf, sizeof(buf)); 249 if (ret < 0) { 250 return ret; 251 } 252 253 return 0; 254 } 255 256 /* 257 * l2_allocate 258 * 259 * Allocate a new l2 entry in the file. If l1_index points to an already 260 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 261 * table) copy the contents of the old L2 table into the newly allocated one. 262 * Otherwise the new table is initialized with zeros. 263 * 264 */ 265 266 static int l2_allocate(BlockDriverState *bs, int l1_index) 267 { 268 BDRVQcow2State *s = bs->opaque; 269 uint64_t old_l2_offset; 270 uint64_t *l2_slice = NULL; 271 unsigned slice, slice_size2, n_slices; 272 int64_t l2_offset; 273 int ret; 274 275 old_l2_offset = s->l1_table[l1_index]; 276 277 trace_qcow2_l2_allocate(bs, l1_index); 278 279 /* allocate a new l2 entry */ 280 281 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 282 if (l2_offset < 0) { 283 ret = l2_offset; 284 goto fail; 285 } 286 287 /* If we're allocating the table at offset 0 then something is wrong */ 288 if (l2_offset == 0) { 289 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 290 "allocation of L2 table at offset 0"); 291 ret = -EIO; 292 goto fail; 293 } 294 295 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 296 if (ret < 0) { 297 goto fail; 298 } 299 300 /* allocate a new entry in the l2 cache */ 301 302 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 303 n_slices = s->cluster_size / slice_size2; 304 305 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 306 for (slice = 0; slice < n_slices; slice++) { 307 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 308 l2_offset + slice * slice_size2, 309 (void **) &l2_slice); 310 if (ret < 0) { 311 goto fail; 312 } 313 314 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 315 /* if there was no old l2 table, clear the new slice */ 316 memset(l2_slice, 0, slice_size2); 317 } else { 318 uint64_t *old_slice; 319 uint64_t old_l2_slice_offset = 320 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 321 322 /* if there was an old l2 table, read a slice from the disk */ 323 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 324 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 325 (void **) &old_slice); 326 if (ret < 0) { 327 goto fail; 328 } 329 330 memcpy(l2_slice, old_slice, slice_size2); 331 332 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 333 } 334 335 /* write the l2 slice to the file */ 336 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 337 338 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 339 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 340 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 341 } 342 343 ret = qcow2_cache_flush(bs, s->l2_table_cache); 344 if (ret < 0) { 345 goto fail; 346 } 347 348 /* update the L1 entry */ 349 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 350 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 351 ret = qcow2_write_l1_entry(bs, l1_index); 352 if (ret < 0) { 353 goto fail; 354 } 355 356 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 357 return 0; 358 359 fail: 360 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 361 if (l2_slice != NULL) { 362 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 363 } 364 s->l1_table[l1_index] = old_l2_offset; 365 if (l2_offset > 0) { 366 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 367 QCOW2_DISCARD_ALWAYS); 368 } 369 return ret; 370 } 371 372 /* 373 * Checks how many clusters in a given L2 slice are contiguous in the image 374 * file. As soon as one of the flags in the bitmask stop_flags changes compared 375 * to the first cluster, the search is stopped and the cluster is not counted 376 * as contiguous. (This allows it, for example, to stop at the first compressed 377 * cluster which may require a different handling) 378 */ 379 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 380 uint64_t *l2_slice, uint64_t stop_flags) 381 { 382 int i; 383 QCow2ClusterType first_cluster_type; 384 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 385 uint64_t first_entry = be64_to_cpu(l2_slice[0]); 386 uint64_t offset = first_entry & mask; 387 388 if (!offset) { 389 return 0; 390 } 391 392 /* must be allocated */ 393 first_cluster_type = qcow2_get_cluster_type(first_entry); 394 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 395 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 396 397 for (i = 0; i < nb_clusters; i++) { 398 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask; 399 if (offset + (uint64_t) i * cluster_size != l2_entry) { 400 break; 401 } 402 } 403 404 return i; 405 } 406 407 /* 408 * Checks how many consecutive unallocated clusters in a given L2 409 * slice have the same cluster type. 410 */ 411 static int count_contiguous_clusters_unallocated(int nb_clusters, 412 uint64_t *l2_slice, 413 QCow2ClusterType wanted_type) 414 { 415 int i; 416 417 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 418 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 419 for (i = 0; i < nb_clusters; i++) { 420 uint64_t entry = be64_to_cpu(l2_slice[i]); 421 QCow2ClusterType type = qcow2_get_cluster_type(entry); 422 423 if (type != wanted_type) { 424 break; 425 } 426 } 427 428 return i; 429 } 430 431 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 432 uint64_t src_cluster_offset, 433 unsigned offset_in_cluster, 434 QEMUIOVector *qiov) 435 { 436 int ret; 437 438 if (qiov->size == 0) { 439 return 0; 440 } 441 442 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 443 444 if (!bs->drv) { 445 return -ENOMEDIUM; 446 } 447 448 /* Call .bdrv_co_readv() directly instead of using the public block-layer 449 * interface. This avoids double I/O throttling and request tracking, 450 * which can lead to deadlock when block layer copy-on-read is enabled. 451 */ 452 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 453 qiov->size, qiov, 0); 454 if (ret < 0) { 455 return ret; 456 } 457 458 return 0; 459 } 460 461 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, 462 uint64_t src_cluster_offset, 463 uint64_t cluster_offset, 464 unsigned offset_in_cluster, 465 uint8_t *buffer, 466 unsigned bytes) 467 { 468 if (bytes && bs->encrypted) { 469 BDRVQcow2State *s = bs->opaque; 470 int64_t offset = (s->crypt_physical_offset ? 471 (cluster_offset + offset_in_cluster) : 472 (src_cluster_offset + offset_in_cluster)); 473 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 474 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 475 assert(s->crypto); 476 if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) { 477 return false; 478 } 479 } 480 return true; 481 } 482 483 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 484 uint64_t cluster_offset, 485 unsigned offset_in_cluster, 486 QEMUIOVector *qiov) 487 { 488 int ret; 489 490 if (qiov->size == 0) { 491 return 0; 492 } 493 494 ret = qcow2_pre_write_overlap_check(bs, 0, 495 cluster_offset + offset_in_cluster, qiov->size); 496 if (ret < 0) { 497 return ret; 498 } 499 500 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 501 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, 502 qiov->size, qiov, 0); 503 if (ret < 0) { 504 return ret; 505 } 506 507 return 0; 508 } 509 510 511 /* 512 * get_cluster_offset 513 * 514 * For a given offset of the virtual disk, find the cluster type and offset in 515 * the qcow2 file. The offset is stored in *cluster_offset. 516 * 517 * On entry, *bytes is the maximum number of contiguous bytes starting at 518 * offset that we are interested in. 519 * 520 * On exit, *bytes is the number of bytes starting at offset that have the same 521 * cluster type and (if applicable) are stored contiguously in the image file. 522 * Compressed clusters are always returned one by one. 523 * 524 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 525 * cases. 526 */ 527 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 528 unsigned int *bytes, uint64_t *cluster_offset) 529 { 530 BDRVQcow2State *s = bs->opaque; 531 unsigned int l2_index; 532 uint64_t l1_index, l2_offset, *l2_slice; 533 int c; 534 unsigned int offset_in_cluster; 535 uint64_t bytes_available, bytes_needed, nb_clusters; 536 QCow2ClusterType type; 537 int ret; 538 539 offset_in_cluster = offset_into_cluster(s, offset); 540 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 541 542 /* compute how many bytes there are between the start of the cluster 543 * containing offset and the end of the l2 slice that contains 544 * the entry pointing to it */ 545 bytes_available = 546 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 547 << s->cluster_bits; 548 549 if (bytes_needed > bytes_available) { 550 bytes_needed = bytes_available; 551 } 552 553 *cluster_offset = 0; 554 555 /* seek to the l2 offset in the l1 table */ 556 557 l1_index = offset_to_l1_index(s, offset); 558 if (l1_index >= s->l1_size) { 559 type = QCOW2_CLUSTER_UNALLOCATED; 560 goto out; 561 } 562 563 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 564 if (!l2_offset) { 565 type = QCOW2_CLUSTER_UNALLOCATED; 566 goto out; 567 } 568 569 if (offset_into_cluster(s, l2_offset)) { 570 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 571 " unaligned (L1 index: %#" PRIx64 ")", 572 l2_offset, l1_index); 573 return -EIO; 574 } 575 576 /* load the l2 slice in memory */ 577 578 ret = l2_load(bs, offset, l2_offset, &l2_slice); 579 if (ret < 0) { 580 return ret; 581 } 582 583 /* find the cluster offset for the given disk offset */ 584 585 l2_index = offset_to_l2_slice_index(s, offset); 586 *cluster_offset = be64_to_cpu(l2_slice[l2_index]); 587 588 nb_clusters = size_to_clusters(s, bytes_needed); 589 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 590 * integers; the minimum cluster size is 512, so this assertion is always 591 * true */ 592 assert(nb_clusters <= INT_MAX); 593 594 type = qcow2_get_cluster_type(*cluster_offset); 595 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 596 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 597 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 598 " in pre-v3 image (L2 offset: %#" PRIx64 599 ", L2 index: %#x)", l2_offset, l2_index); 600 ret = -EIO; 601 goto fail; 602 } 603 switch (type) { 604 case QCOW2_CLUSTER_COMPRESSED: 605 /* Compressed clusters can only be processed one by one */ 606 c = 1; 607 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 608 break; 609 case QCOW2_CLUSTER_ZERO_PLAIN: 610 case QCOW2_CLUSTER_UNALLOCATED: 611 /* how many empty clusters ? */ 612 c = count_contiguous_clusters_unallocated(nb_clusters, 613 &l2_slice[l2_index], type); 614 *cluster_offset = 0; 615 break; 616 case QCOW2_CLUSTER_ZERO_ALLOC: 617 case QCOW2_CLUSTER_NORMAL: 618 /* how many allocated clusters ? */ 619 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 620 &l2_slice[l2_index], QCOW_OFLAG_ZERO); 621 *cluster_offset &= L2E_OFFSET_MASK; 622 if (offset_into_cluster(s, *cluster_offset)) { 623 qcow2_signal_corruption(bs, true, -1, -1, 624 "Cluster allocation offset %#" 625 PRIx64 " unaligned (L2 offset: %#" PRIx64 626 ", L2 index: %#x)", *cluster_offset, 627 l2_offset, l2_index); 628 ret = -EIO; 629 goto fail; 630 } 631 break; 632 default: 633 abort(); 634 } 635 636 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 637 638 bytes_available = (int64_t)c * s->cluster_size; 639 640 out: 641 if (bytes_available > bytes_needed) { 642 bytes_available = bytes_needed; 643 } 644 645 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 646 * subtracting offset_in_cluster will therefore definitely yield something 647 * not exceeding UINT_MAX */ 648 assert(bytes_available - offset_in_cluster <= UINT_MAX); 649 *bytes = bytes_available - offset_in_cluster; 650 651 return type; 652 653 fail: 654 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 655 return ret; 656 } 657 658 /* 659 * get_cluster_table 660 * 661 * for a given disk offset, load (and allocate if needed) 662 * the appropriate slice of its l2 table. 663 * 664 * the cluster index in the l2 slice is given to the caller. 665 * 666 * Returns 0 on success, -errno in failure case 667 */ 668 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 669 uint64_t **new_l2_slice, 670 int *new_l2_index) 671 { 672 BDRVQcow2State *s = bs->opaque; 673 unsigned int l2_index; 674 uint64_t l1_index, l2_offset; 675 uint64_t *l2_slice = NULL; 676 int ret; 677 678 /* seek to the l2 offset in the l1 table */ 679 680 l1_index = offset_to_l1_index(s, offset); 681 if (l1_index >= s->l1_size) { 682 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 683 if (ret < 0) { 684 return ret; 685 } 686 } 687 688 assert(l1_index < s->l1_size); 689 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 690 if (offset_into_cluster(s, l2_offset)) { 691 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 692 " unaligned (L1 index: %#" PRIx64 ")", 693 l2_offset, l1_index); 694 return -EIO; 695 } 696 697 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 698 /* First allocate a new L2 table (and do COW if needed) */ 699 ret = l2_allocate(bs, l1_index); 700 if (ret < 0) { 701 return ret; 702 } 703 704 /* Then decrease the refcount of the old table */ 705 if (l2_offset) { 706 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 707 QCOW2_DISCARD_OTHER); 708 } 709 710 /* Get the offset of the newly-allocated l2 table */ 711 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 712 assert(offset_into_cluster(s, l2_offset) == 0); 713 } 714 715 /* load the l2 slice in memory */ 716 ret = l2_load(bs, offset, l2_offset, &l2_slice); 717 if (ret < 0) { 718 return ret; 719 } 720 721 /* find the cluster offset for the given disk offset */ 722 723 l2_index = offset_to_l2_slice_index(s, offset); 724 725 *new_l2_slice = l2_slice; 726 *new_l2_index = l2_index; 727 728 return 0; 729 } 730 731 /* 732 * alloc_compressed_cluster_offset 733 * 734 * For a given offset of the disk image, return cluster offset in 735 * qcow2 file. 736 * 737 * If the offset is not found, allocate a new compressed cluster. 738 * 739 * Return the cluster offset if successful, 740 * Return 0, otherwise. 741 * 742 */ 743 744 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 745 uint64_t offset, 746 int compressed_size) 747 { 748 BDRVQcow2State *s = bs->opaque; 749 int l2_index, ret; 750 uint64_t *l2_slice; 751 int64_t cluster_offset; 752 int nb_csectors; 753 754 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 755 if (ret < 0) { 756 return 0; 757 } 758 759 /* Compression can't overwrite anything. Fail if the cluster was already 760 * allocated. */ 761 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 762 if (cluster_offset & L2E_OFFSET_MASK) { 763 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 764 return 0; 765 } 766 767 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 768 if (cluster_offset < 0) { 769 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 770 return 0; 771 } 772 773 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 774 (cluster_offset >> 9); 775 776 cluster_offset |= QCOW_OFLAG_COMPRESSED | 777 ((uint64_t)nb_csectors << s->csize_shift); 778 779 /* update L2 table */ 780 781 /* compressed clusters never have the copied flag */ 782 783 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 784 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 785 l2_slice[l2_index] = cpu_to_be64(cluster_offset); 786 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 787 788 return cluster_offset; 789 } 790 791 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 792 { 793 BDRVQcow2State *s = bs->opaque; 794 Qcow2COWRegion *start = &m->cow_start; 795 Qcow2COWRegion *end = &m->cow_end; 796 unsigned buffer_size; 797 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 798 bool merge_reads; 799 uint8_t *start_buffer, *end_buffer; 800 QEMUIOVector qiov; 801 int ret; 802 803 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 804 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 805 assert(start->offset + start->nb_bytes <= end->offset); 806 assert(!m->data_qiov || m->data_qiov->size == data_bytes); 807 808 if (start->nb_bytes == 0 && end->nb_bytes == 0) { 809 return 0; 810 } 811 812 /* If we have to read both the start and end COW regions and the 813 * middle region is not too large then perform just one read 814 * operation */ 815 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 816 if (merge_reads) { 817 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 818 } else { 819 /* If we have to do two reads, add some padding in the middle 820 * if necessary to make sure that the end region is optimally 821 * aligned. */ 822 size_t align = bdrv_opt_mem_align(bs); 823 assert(align > 0 && align <= UINT_MAX); 824 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 825 UINT_MAX - end->nb_bytes); 826 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 827 } 828 829 /* Reserve a buffer large enough to store all the data that we're 830 * going to read */ 831 start_buffer = qemu_try_blockalign(bs, buffer_size); 832 if (start_buffer == NULL) { 833 return -ENOMEM; 834 } 835 /* The part of the buffer where the end region is located */ 836 end_buffer = start_buffer + buffer_size - end->nb_bytes; 837 838 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); 839 840 qemu_co_mutex_unlock(&s->lock); 841 /* First we read the existing data from both COW regions. We 842 * either read the whole region in one go, or the start and end 843 * regions separately. */ 844 if (merge_reads) { 845 qemu_iovec_add(&qiov, start_buffer, buffer_size); 846 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 847 } else { 848 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 849 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 850 if (ret < 0) { 851 goto fail; 852 } 853 854 qemu_iovec_reset(&qiov); 855 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 856 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 857 } 858 if (ret < 0) { 859 goto fail; 860 } 861 862 /* Encrypt the data if necessary before writing it */ 863 if (bs->encrypted) { 864 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 865 start->offset, start_buffer, 866 start->nb_bytes) || 867 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 868 end->offset, end_buffer, end->nb_bytes)) { 869 ret = -EIO; 870 goto fail; 871 } 872 } 873 874 /* And now we can write everything. If we have the guest data we 875 * can write everything in one single operation */ 876 if (m->data_qiov) { 877 qemu_iovec_reset(&qiov); 878 if (start->nb_bytes) { 879 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 880 } 881 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); 882 if (end->nb_bytes) { 883 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 884 } 885 /* NOTE: we have a write_aio blkdebug event here followed by 886 * a cow_write one in do_perform_cow_write(), but there's only 887 * one single I/O operation */ 888 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 889 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 890 } else { 891 /* If there's no guest data then write both COW regions separately */ 892 qemu_iovec_reset(&qiov); 893 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 894 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 895 if (ret < 0) { 896 goto fail; 897 } 898 899 qemu_iovec_reset(&qiov); 900 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 901 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 902 } 903 904 fail: 905 qemu_co_mutex_lock(&s->lock); 906 907 /* 908 * Before we update the L2 table to actually point to the new cluster, we 909 * need to be sure that the refcounts have been increased and COW was 910 * handled. 911 */ 912 if (ret == 0) { 913 qcow2_cache_depends_on_flush(s->l2_table_cache); 914 } 915 916 qemu_vfree(start_buffer); 917 qemu_iovec_destroy(&qiov); 918 return ret; 919 } 920 921 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 922 { 923 BDRVQcow2State *s = bs->opaque; 924 int i, j = 0, l2_index, ret; 925 uint64_t *old_cluster, *l2_slice; 926 uint64_t cluster_offset = m->alloc_offset; 927 928 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 929 assert(m->nb_clusters > 0); 930 931 old_cluster = g_try_new(uint64_t, m->nb_clusters); 932 if (old_cluster == NULL) { 933 ret = -ENOMEM; 934 goto err; 935 } 936 937 /* copy content of unmodified sectors */ 938 ret = perform_cow(bs, m); 939 if (ret < 0) { 940 goto err; 941 } 942 943 /* Update L2 table. */ 944 if (s->use_lazy_refcounts) { 945 qcow2_mark_dirty(bs); 946 } 947 if (qcow2_need_accurate_refcounts(s)) { 948 qcow2_cache_set_dependency(bs, s->l2_table_cache, 949 s->refcount_block_cache); 950 } 951 952 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 953 if (ret < 0) { 954 goto err; 955 } 956 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 957 958 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 959 for (i = 0; i < m->nb_clusters; i++) { 960 /* if two concurrent writes happen to the same unallocated cluster 961 * each write allocates separate cluster and writes data concurrently. 962 * The first one to complete updates l2 table with pointer to its 963 * cluster the second one has to do RMW (which is done above by 964 * perform_cow()), update l2 table with its cluster pointer and free 965 * old cluster. This is what this loop does */ 966 if (l2_slice[l2_index + i] != 0) { 967 old_cluster[j++] = l2_slice[l2_index + i]; 968 } 969 970 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset + 971 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 972 } 973 974 975 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 976 977 /* 978 * If this was a COW, we need to decrease the refcount of the old cluster. 979 * 980 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 981 * clusters), the next write will reuse them anyway. 982 */ 983 if (!m->keep_old_clusters && j != 0) { 984 for (i = 0; i < j; i++) { 985 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 986 QCOW2_DISCARD_NEVER); 987 } 988 } 989 990 ret = 0; 991 err: 992 g_free(old_cluster); 993 return ret; 994 } 995 996 /* 997 * Returns the number of contiguous clusters that can be used for an allocating 998 * write, but require COW to be performed (this includes yet unallocated space, 999 * which must copy from the backing file) 1000 */ 1001 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 1002 uint64_t *l2_slice, int l2_index) 1003 { 1004 int i; 1005 1006 for (i = 0; i < nb_clusters; i++) { 1007 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1008 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 1009 1010 switch(cluster_type) { 1011 case QCOW2_CLUSTER_NORMAL: 1012 if (l2_entry & QCOW_OFLAG_COPIED) { 1013 goto out; 1014 } 1015 break; 1016 case QCOW2_CLUSTER_UNALLOCATED: 1017 case QCOW2_CLUSTER_COMPRESSED: 1018 case QCOW2_CLUSTER_ZERO_PLAIN: 1019 case QCOW2_CLUSTER_ZERO_ALLOC: 1020 break; 1021 default: 1022 abort(); 1023 } 1024 } 1025 1026 out: 1027 assert(i <= nb_clusters); 1028 return i; 1029 } 1030 1031 /* 1032 * Check if there already is an AIO write request in flight which allocates 1033 * the same cluster. In this case we need to wait until the previous 1034 * request has completed and updated the L2 table accordingly. 1035 * 1036 * Returns: 1037 * 0 if there was no dependency. *cur_bytes indicates the number of 1038 * bytes from guest_offset that can be read before the next 1039 * dependency must be processed (or the request is complete) 1040 * 1041 * -EAGAIN if we had to wait for another request, previously gathered 1042 * information on cluster allocation may be invalid now. The caller 1043 * must start over anyway, so consider *cur_bytes undefined. 1044 */ 1045 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1046 uint64_t *cur_bytes, QCowL2Meta **m) 1047 { 1048 BDRVQcow2State *s = bs->opaque; 1049 QCowL2Meta *old_alloc; 1050 uint64_t bytes = *cur_bytes; 1051 1052 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1053 1054 uint64_t start = guest_offset; 1055 uint64_t end = start + bytes; 1056 uint64_t old_start = l2meta_cow_start(old_alloc); 1057 uint64_t old_end = l2meta_cow_end(old_alloc); 1058 1059 if (end <= old_start || start >= old_end) { 1060 /* No intersection */ 1061 } else { 1062 if (start < old_start) { 1063 /* Stop at the start of a running allocation */ 1064 bytes = old_start - start; 1065 } else { 1066 bytes = 0; 1067 } 1068 1069 /* Stop if already an l2meta exists. After yielding, it wouldn't 1070 * be valid any more, so we'd have to clean up the old L2Metas 1071 * and deal with requests depending on them before starting to 1072 * gather new ones. Not worth the trouble. */ 1073 if (bytes == 0 && *m) { 1074 *cur_bytes = 0; 1075 return 0; 1076 } 1077 1078 if (bytes == 0) { 1079 /* Wait for the dependency to complete. We need to recheck 1080 * the free/allocated clusters when we continue. */ 1081 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1082 return -EAGAIN; 1083 } 1084 } 1085 } 1086 1087 /* Make sure that existing clusters and new allocations are only used up to 1088 * the next dependency if we shortened the request above */ 1089 *cur_bytes = bytes; 1090 1091 return 0; 1092 } 1093 1094 /* 1095 * Checks how many already allocated clusters that don't require a copy on 1096 * write there are at the given guest_offset (up to *bytes). If 1097 * *host_offset is not zero, only physically contiguous clusters beginning at 1098 * this host offset are counted. 1099 * 1100 * Note that guest_offset may not be cluster aligned. In this case, the 1101 * returned *host_offset points to exact byte referenced by guest_offset and 1102 * therefore isn't cluster aligned as well. 1103 * 1104 * Returns: 1105 * 0: if no allocated clusters are available at the given offset. 1106 * *bytes is normally unchanged. It is set to 0 if the cluster 1107 * is allocated and doesn't need COW, but doesn't have the right 1108 * physical offset. 1109 * 1110 * 1: if allocated clusters that don't require a COW are available at 1111 * the requested offset. *bytes may have decreased and describes 1112 * the length of the area that can be written to. 1113 * 1114 * -errno: in error cases 1115 */ 1116 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1117 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1118 { 1119 BDRVQcow2State *s = bs->opaque; 1120 int l2_index; 1121 uint64_t cluster_offset; 1122 uint64_t *l2_slice; 1123 uint64_t nb_clusters; 1124 unsigned int keep_clusters; 1125 int ret; 1126 1127 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1128 *bytes); 1129 1130 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 1131 == offset_into_cluster(s, *host_offset)); 1132 1133 /* 1134 * Calculate the number of clusters to look for. We stop at L2 slice 1135 * boundaries to keep things simple. 1136 */ 1137 nb_clusters = 1138 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1139 1140 l2_index = offset_to_l2_slice_index(s, guest_offset); 1141 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1142 assert(nb_clusters <= INT_MAX); 1143 1144 /* Find L2 entry for the first involved cluster */ 1145 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1146 if (ret < 0) { 1147 return ret; 1148 } 1149 1150 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 1151 1152 /* Check how many clusters are already allocated and don't need COW */ 1153 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1154 && (cluster_offset & QCOW_OFLAG_COPIED)) 1155 { 1156 /* If a specific host_offset is required, check it */ 1157 bool offset_matches = 1158 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1159 1160 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1161 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1162 "%#llx unaligned (guest offset: %#" PRIx64 1163 ")", cluster_offset & L2E_OFFSET_MASK, 1164 guest_offset); 1165 ret = -EIO; 1166 goto out; 1167 } 1168 1169 if (*host_offset != 0 && !offset_matches) { 1170 *bytes = 0; 1171 ret = 0; 1172 goto out; 1173 } 1174 1175 /* We keep all QCOW_OFLAG_COPIED clusters */ 1176 keep_clusters = 1177 count_contiguous_clusters(nb_clusters, s->cluster_size, 1178 &l2_slice[l2_index], 1179 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1180 assert(keep_clusters <= nb_clusters); 1181 1182 *bytes = MIN(*bytes, 1183 keep_clusters * s->cluster_size 1184 - offset_into_cluster(s, guest_offset)); 1185 1186 ret = 1; 1187 } else { 1188 ret = 0; 1189 } 1190 1191 /* Cleanup */ 1192 out: 1193 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1194 1195 /* Only return a host offset if we actually made progress. Otherwise we 1196 * would make requirements for handle_alloc() that it can't fulfill */ 1197 if (ret > 0) { 1198 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1199 + offset_into_cluster(s, guest_offset); 1200 } 1201 1202 return ret; 1203 } 1204 1205 /* 1206 * Allocates new clusters for the given guest_offset. 1207 * 1208 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1209 * contain the number of clusters that have been allocated and are contiguous 1210 * in the image file. 1211 * 1212 * If *host_offset is non-zero, it specifies the offset in the image file at 1213 * which the new clusters must start. *nb_clusters can be 0 on return in this 1214 * case if the cluster at host_offset is already in use. If *host_offset is 1215 * zero, the clusters can be allocated anywhere in the image file. 1216 * 1217 * *host_offset is updated to contain the offset into the image file at which 1218 * the first allocated cluster starts. 1219 * 1220 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1221 * function has been waiting for another request and the allocation must be 1222 * restarted, but the whole request should not be failed. 1223 */ 1224 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1225 uint64_t *host_offset, uint64_t *nb_clusters) 1226 { 1227 BDRVQcow2State *s = bs->opaque; 1228 1229 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1230 *host_offset, *nb_clusters); 1231 1232 /* Allocate new clusters */ 1233 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1234 if (*host_offset == 0) { 1235 int64_t cluster_offset = 1236 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1237 if (cluster_offset < 0) { 1238 return cluster_offset; 1239 } 1240 *host_offset = cluster_offset; 1241 return 0; 1242 } else { 1243 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1244 if (ret < 0) { 1245 return ret; 1246 } 1247 *nb_clusters = ret; 1248 return 0; 1249 } 1250 } 1251 1252 /* 1253 * Allocates new clusters for an area that either is yet unallocated or needs a 1254 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1255 * the new allocation can match the specified host offset. 1256 * 1257 * Note that guest_offset may not be cluster aligned. In this case, the 1258 * returned *host_offset points to exact byte referenced by guest_offset and 1259 * therefore isn't cluster aligned as well. 1260 * 1261 * Returns: 1262 * 0: if no clusters could be allocated. *bytes is set to 0, 1263 * *host_offset is left unchanged. 1264 * 1265 * 1: if new clusters were allocated. *bytes may be decreased if the 1266 * new allocation doesn't cover all of the requested area. 1267 * *host_offset is updated to contain the host offset of the first 1268 * newly allocated cluster. 1269 * 1270 * -errno: in error cases 1271 */ 1272 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1273 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1274 { 1275 BDRVQcow2State *s = bs->opaque; 1276 int l2_index; 1277 uint64_t *l2_slice; 1278 uint64_t entry; 1279 uint64_t nb_clusters; 1280 int ret; 1281 bool keep_old_clusters = false; 1282 1283 uint64_t alloc_cluster_offset = 0; 1284 1285 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1286 *bytes); 1287 assert(*bytes > 0); 1288 1289 /* 1290 * Calculate the number of clusters to look for. We stop at L2 slice 1291 * boundaries to keep things simple. 1292 */ 1293 nb_clusters = 1294 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1295 1296 l2_index = offset_to_l2_slice_index(s, guest_offset); 1297 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1298 assert(nb_clusters <= INT_MAX); 1299 1300 /* Find L2 entry for the first involved cluster */ 1301 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1302 if (ret < 0) { 1303 return ret; 1304 } 1305 1306 entry = be64_to_cpu(l2_slice[l2_index]); 1307 1308 /* For the moment, overwrite compressed clusters one by one */ 1309 if (entry & QCOW_OFLAG_COMPRESSED) { 1310 nb_clusters = 1; 1311 } else { 1312 nb_clusters = count_cow_clusters(s, nb_clusters, l2_slice, l2_index); 1313 } 1314 1315 /* This function is only called when there were no non-COW clusters, so if 1316 * we can't find any unallocated or COW clusters either, something is 1317 * wrong with our code. */ 1318 assert(nb_clusters > 0); 1319 1320 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1321 (entry & QCOW_OFLAG_COPIED) && 1322 (!*host_offset || 1323 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1324 { 1325 int preallocated_nb_clusters; 1326 1327 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) { 1328 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero " 1329 "cluster offset %#llx unaligned (guest " 1330 "offset: %#" PRIx64 ")", 1331 entry & L2E_OFFSET_MASK, guest_offset); 1332 ret = -EIO; 1333 goto fail; 1334 } 1335 1336 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1337 * would be fine, too, but count_cow_clusters() above has limited 1338 * nb_clusters already to a range of COW clusters */ 1339 preallocated_nb_clusters = 1340 count_contiguous_clusters(nb_clusters, s->cluster_size, 1341 &l2_slice[l2_index], QCOW_OFLAG_COPIED); 1342 assert(preallocated_nb_clusters > 0); 1343 1344 nb_clusters = preallocated_nb_clusters; 1345 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1346 1347 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1348 * should not free them. */ 1349 keep_old_clusters = true; 1350 } 1351 1352 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1353 1354 if (!alloc_cluster_offset) { 1355 /* Allocate, if necessary at a given offset in the image file */ 1356 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1357 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1358 &nb_clusters); 1359 if (ret < 0) { 1360 goto fail; 1361 } 1362 1363 /* Can't extend contiguous allocation */ 1364 if (nb_clusters == 0) { 1365 *bytes = 0; 1366 return 0; 1367 } 1368 1369 /* !*host_offset would overwrite the image header and is reserved for 1370 * "no host offset preferred". If 0 was a valid host offset, it'd 1371 * trigger the following overlap check; do that now to avoid having an 1372 * invalid value in *host_offset. */ 1373 if (!alloc_cluster_offset) { 1374 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1375 nb_clusters * s->cluster_size); 1376 assert(ret < 0); 1377 goto fail; 1378 } 1379 } 1380 1381 /* 1382 * Save info needed for meta data update. 1383 * 1384 * requested_bytes: Number of bytes from the start of the first 1385 * newly allocated cluster to the end of the (possibly shortened 1386 * before) write request. 1387 * 1388 * avail_bytes: Number of bytes from the start of the first 1389 * newly allocated to the end of the last newly allocated cluster. 1390 * 1391 * nb_bytes: The number of bytes from the start of the first 1392 * newly allocated cluster to the end of the area that the write 1393 * request actually writes to (excluding COW at the end) 1394 */ 1395 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1396 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1397 int nb_bytes = MIN(requested_bytes, avail_bytes); 1398 QCowL2Meta *old_m = *m; 1399 1400 *m = g_malloc0(sizeof(**m)); 1401 1402 **m = (QCowL2Meta) { 1403 .next = old_m, 1404 1405 .alloc_offset = alloc_cluster_offset, 1406 .offset = start_of_cluster(s, guest_offset), 1407 .nb_clusters = nb_clusters, 1408 1409 .keep_old_clusters = keep_old_clusters, 1410 1411 .cow_start = { 1412 .offset = 0, 1413 .nb_bytes = offset_into_cluster(s, guest_offset), 1414 }, 1415 .cow_end = { 1416 .offset = nb_bytes, 1417 .nb_bytes = avail_bytes - nb_bytes, 1418 }, 1419 }; 1420 qemu_co_queue_init(&(*m)->dependent_requests); 1421 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1422 1423 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1424 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1425 assert(*bytes != 0); 1426 1427 return 1; 1428 1429 fail: 1430 if (*m && (*m)->nb_clusters > 0) { 1431 QLIST_REMOVE(*m, next_in_flight); 1432 } 1433 return ret; 1434 } 1435 1436 /* 1437 * alloc_cluster_offset 1438 * 1439 * For a given offset on the virtual disk, find the cluster offset in qcow2 1440 * file. If the offset is not found, allocate a new cluster. 1441 * 1442 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1443 * other fields in m are meaningless. 1444 * 1445 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1446 * contiguous clusters that have been allocated. In this case, the other 1447 * fields of m are valid and contain information about the first allocated 1448 * cluster. 1449 * 1450 * If the request conflicts with another write request in flight, the coroutine 1451 * is queued and will be reentered when the dependency has completed. 1452 * 1453 * Return 0 on success and -errno in error cases 1454 */ 1455 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1456 unsigned int *bytes, uint64_t *host_offset, 1457 QCowL2Meta **m) 1458 { 1459 BDRVQcow2State *s = bs->opaque; 1460 uint64_t start, remaining; 1461 uint64_t cluster_offset; 1462 uint64_t cur_bytes; 1463 int ret; 1464 1465 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1466 1467 again: 1468 start = offset; 1469 remaining = *bytes; 1470 cluster_offset = 0; 1471 *host_offset = 0; 1472 cur_bytes = 0; 1473 *m = NULL; 1474 1475 while (true) { 1476 1477 if (!*host_offset) { 1478 *host_offset = start_of_cluster(s, cluster_offset); 1479 } 1480 1481 assert(remaining >= cur_bytes); 1482 1483 start += cur_bytes; 1484 remaining -= cur_bytes; 1485 cluster_offset += cur_bytes; 1486 1487 if (remaining == 0) { 1488 break; 1489 } 1490 1491 cur_bytes = remaining; 1492 1493 /* 1494 * Now start gathering as many contiguous clusters as possible: 1495 * 1496 * 1. Check for overlaps with in-flight allocations 1497 * 1498 * a) Overlap not in the first cluster -> shorten this request and 1499 * let the caller handle the rest in its next loop iteration. 1500 * 1501 * b) Real overlaps of two requests. Yield and restart the search 1502 * for contiguous clusters (the situation could have changed 1503 * while we were sleeping) 1504 * 1505 * c) TODO: Request starts in the same cluster as the in-flight 1506 * allocation ends. Shorten the COW of the in-fight allocation, 1507 * set cluster_offset to write to the same cluster and set up 1508 * the right synchronisation between the in-flight request and 1509 * the new one. 1510 */ 1511 ret = handle_dependencies(bs, start, &cur_bytes, m); 1512 if (ret == -EAGAIN) { 1513 /* Currently handle_dependencies() doesn't yield if we already had 1514 * an allocation. If it did, we would have to clean up the L2Meta 1515 * structs before starting over. */ 1516 assert(*m == NULL); 1517 goto again; 1518 } else if (ret < 0) { 1519 return ret; 1520 } else if (cur_bytes == 0) { 1521 break; 1522 } else { 1523 /* handle_dependencies() may have decreased cur_bytes (shortened 1524 * the allocations below) so that the next dependency is processed 1525 * correctly during the next loop iteration. */ 1526 } 1527 1528 /* 1529 * 2. Count contiguous COPIED clusters. 1530 */ 1531 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1532 if (ret < 0) { 1533 return ret; 1534 } else if (ret) { 1535 continue; 1536 } else if (cur_bytes == 0) { 1537 break; 1538 } 1539 1540 /* 1541 * 3. If the request still hasn't completed, allocate new clusters, 1542 * considering any cluster_offset of steps 1c or 2. 1543 */ 1544 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1545 if (ret < 0) { 1546 return ret; 1547 } else if (ret) { 1548 continue; 1549 } else { 1550 assert(cur_bytes == 0); 1551 break; 1552 } 1553 } 1554 1555 *bytes -= remaining; 1556 assert(*bytes > 0); 1557 assert(*host_offset != 0); 1558 1559 return 0; 1560 } 1561 1562 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1563 const uint8_t *buf, int buf_size) 1564 { 1565 z_stream strm1, *strm = &strm1; 1566 int ret, out_len; 1567 1568 memset(strm, 0, sizeof(*strm)); 1569 1570 strm->next_in = (uint8_t *)buf; 1571 strm->avail_in = buf_size; 1572 strm->next_out = out_buf; 1573 strm->avail_out = out_buf_size; 1574 1575 ret = inflateInit2(strm, -12); 1576 if (ret != Z_OK) 1577 return -1; 1578 ret = inflate(strm, Z_FINISH); 1579 out_len = strm->next_out - out_buf; 1580 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1581 out_len != out_buf_size) { 1582 inflateEnd(strm); 1583 return -1; 1584 } 1585 inflateEnd(strm); 1586 return 0; 1587 } 1588 1589 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1590 { 1591 BDRVQcow2State *s = bs->opaque; 1592 int ret, csize, nb_csectors, sector_offset; 1593 uint64_t coffset; 1594 1595 coffset = cluster_offset & s->cluster_offset_mask; 1596 if (s->cluster_cache_offset != coffset) { 1597 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1598 sector_offset = coffset & 511; 1599 csize = nb_csectors * 512 - sector_offset; 1600 1601 /* Allocate buffers on first decompress operation, most images are 1602 * uncompressed and the memory overhead can be avoided. The buffers 1603 * are freed in .bdrv_close(). 1604 */ 1605 if (!s->cluster_data) { 1606 /* one more sector for decompressed data alignment */ 1607 s->cluster_data = qemu_try_blockalign(bs->file->bs, 1608 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512); 1609 if (!s->cluster_data) { 1610 return -ENOMEM; 1611 } 1612 } 1613 if (!s->cluster_cache) { 1614 s->cluster_cache = g_malloc(s->cluster_size); 1615 } 1616 1617 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1618 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, 1619 nb_csectors); 1620 if (ret < 0) { 1621 return ret; 1622 } 1623 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1624 s->cluster_data + sector_offset, csize) < 0) { 1625 return -EIO; 1626 } 1627 s->cluster_cache_offset = coffset; 1628 } 1629 return 0; 1630 } 1631 1632 /* 1633 * This discards as many clusters of nb_clusters as possible at once (i.e. 1634 * all clusters in the same L2 slice) and returns the number of discarded 1635 * clusters. 1636 */ 1637 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1638 uint64_t nb_clusters, 1639 enum qcow2_discard_type type, bool full_discard) 1640 { 1641 BDRVQcow2State *s = bs->opaque; 1642 uint64_t *l2_slice; 1643 int l2_index; 1644 int ret; 1645 int i; 1646 1647 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1648 if (ret < 0) { 1649 return ret; 1650 } 1651 1652 /* Limit nb_clusters to one L2 slice */ 1653 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1654 assert(nb_clusters <= INT_MAX); 1655 1656 for (i = 0; i < nb_clusters; i++) { 1657 uint64_t old_l2_entry; 1658 1659 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1660 1661 /* 1662 * If full_discard is false, make sure that a discarded area reads back 1663 * as zeroes for v3 images (we cannot do it for v2 without actually 1664 * writing a zero-filled buffer). We can skip the operation if the 1665 * cluster is already marked as zero, or if it's unallocated and we 1666 * don't have a backing file. 1667 * 1668 * TODO We might want to use bdrv_block_status(bs) here, but we're 1669 * holding s->lock, so that doesn't work today. 1670 * 1671 * If full_discard is true, the sector should not read back as zeroes, 1672 * but rather fall through to the backing file. 1673 */ 1674 switch (qcow2_get_cluster_type(old_l2_entry)) { 1675 case QCOW2_CLUSTER_UNALLOCATED: 1676 if (full_discard || !bs->backing) { 1677 continue; 1678 } 1679 break; 1680 1681 case QCOW2_CLUSTER_ZERO_PLAIN: 1682 if (!full_discard) { 1683 continue; 1684 } 1685 break; 1686 1687 case QCOW2_CLUSTER_ZERO_ALLOC: 1688 case QCOW2_CLUSTER_NORMAL: 1689 case QCOW2_CLUSTER_COMPRESSED: 1690 break; 1691 1692 default: 1693 abort(); 1694 } 1695 1696 /* First remove L2 entries */ 1697 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1698 if (!full_discard && s->qcow_version >= 3) { 1699 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1700 } else { 1701 l2_slice[l2_index + i] = cpu_to_be64(0); 1702 } 1703 1704 /* Then decrease the refcount */ 1705 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1706 } 1707 1708 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1709 1710 return nb_clusters; 1711 } 1712 1713 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1714 uint64_t bytes, enum qcow2_discard_type type, 1715 bool full_discard) 1716 { 1717 BDRVQcow2State *s = bs->opaque; 1718 uint64_t end_offset = offset + bytes; 1719 uint64_t nb_clusters; 1720 int64_t cleared; 1721 int ret; 1722 1723 /* Caller must pass aligned values, except at image end */ 1724 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1725 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1726 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1727 1728 nb_clusters = size_to_clusters(s, bytes); 1729 1730 s->cache_discards = true; 1731 1732 /* Each L2 slice is handled by its own loop iteration */ 1733 while (nb_clusters > 0) { 1734 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1735 full_discard); 1736 if (cleared < 0) { 1737 ret = cleared; 1738 goto fail; 1739 } 1740 1741 nb_clusters -= cleared; 1742 offset += (cleared * s->cluster_size); 1743 } 1744 1745 ret = 0; 1746 fail: 1747 s->cache_discards = false; 1748 qcow2_process_discards(bs, ret); 1749 1750 return ret; 1751 } 1752 1753 /* 1754 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1755 * all clusters in the same L2 slice) and returns the number of zeroed 1756 * clusters. 1757 */ 1758 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1759 uint64_t nb_clusters, int flags) 1760 { 1761 BDRVQcow2State *s = bs->opaque; 1762 uint64_t *l2_slice; 1763 int l2_index; 1764 int ret; 1765 int i; 1766 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1767 1768 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1769 if (ret < 0) { 1770 return ret; 1771 } 1772 1773 /* Limit nb_clusters to one L2 slice */ 1774 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1775 assert(nb_clusters <= INT_MAX); 1776 1777 for (i = 0; i < nb_clusters; i++) { 1778 uint64_t old_offset; 1779 QCow2ClusterType cluster_type; 1780 1781 old_offset = be64_to_cpu(l2_slice[l2_index + i]); 1782 1783 /* 1784 * Minimize L2 changes if the cluster already reads back as 1785 * zeroes with correct allocation. 1786 */ 1787 cluster_type = qcow2_get_cluster_type(old_offset); 1788 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1789 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1790 continue; 1791 } 1792 1793 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1794 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1795 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1796 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1797 } else { 1798 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1799 } 1800 } 1801 1802 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1803 1804 return nb_clusters; 1805 } 1806 1807 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1808 uint64_t bytes, int flags) 1809 { 1810 BDRVQcow2State *s = bs->opaque; 1811 uint64_t end_offset = offset + bytes; 1812 uint64_t nb_clusters; 1813 int64_t cleared; 1814 int ret; 1815 1816 /* Caller must pass aligned values, except at image end */ 1817 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1818 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1819 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1820 1821 /* The zero flag is only supported by version 3 and newer */ 1822 if (s->qcow_version < 3) { 1823 return -ENOTSUP; 1824 } 1825 1826 /* Each L2 slice is handled by its own loop iteration */ 1827 nb_clusters = size_to_clusters(s, bytes); 1828 1829 s->cache_discards = true; 1830 1831 while (nb_clusters > 0) { 1832 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 1833 if (cleared < 0) { 1834 ret = cleared; 1835 goto fail; 1836 } 1837 1838 nb_clusters -= cleared; 1839 offset += (cleared * s->cluster_size); 1840 } 1841 1842 ret = 0; 1843 fail: 1844 s->cache_discards = false; 1845 qcow2_process_discards(bs, ret); 1846 1847 return ret; 1848 } 1849 1850 /* 1851 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1852 * non-backed non-pre-allocated zero clusters). 1853 * 1854 * l1_entries and *visited_l1_entries are used to keep track of progress for 1855 * status_cb(). l1_entries contains the total number of L1 entries and 1856 * *visited_l1_entries counts all visited L1 entries. 1857 */ 1858 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1859 int l1_size, int64_t *visited_l1_entries, 1860 int64_t l1_entries, 1861 BlockDriverAmendStatusCB *status_cb, 1862 void *cb_opaque) 1863 { 1864 BDRVQcow2State *s = bs->opaque; 1865 bool is_active_l1 = (l1_table == s->l1_table); 1866 uint64_t *l2_slice = NULL; 1867 unsigned slice, slice_size2, n_slices; 1868 int ret; 1869 int i, j; 1870 1871 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 1872 n_slices = s->cluster_size / slice_size2; 1873 1874 if (!is_active_l1) { 1875 /* inactive L2 tables require a buffer to be stored in when loading 1876 * them from disk */ 1877 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 1878 if (l2_slice == NULL) { 1879 return -ENOMEM; 1880 } 1881 } 1882 1883 for (i = 0; i < l1_size; i++) { 1884 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1885 uint64_t l2_refcount; 1886 1887 if (!l2_offset) { 1888 /* unallocated */ 1889 (*visited_l1_entries)++; 1890 if (status_cb) { 1891 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1892 } 1893 continue; 1894 } 1895 1896 if (offset_into_cluster(s, l2_offset)) { 1897 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1898 PRIx64 " unaligned (L1 index: %#x)", 1899 l2_offset, i); 1900 ret = -EIO; 1901 goto fail; 1902 } 1903 1904 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1905 &l2_refcount); 1906 if (ret < 0) { 1907 goto fail; 1908 } 1909 1910 for (slice = 0; slice < n_slices; slice++) { 1911 uint64_t slice_offset = l2_offset + slice * slice_size2; 1912 bool l2_dirty = false; 1913 if (is_active_l1) { 1914 /* get active L2 tables from cache */ 1915 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 1916 (void **)&l2_slice); 1917 } else { 1918 /* load inactive L2 tables from disk */ 1919 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 1920 } 1921 if (ret < 0) { 1922 goto fail; 1923 } 1924 1925 for (j = 0; j < s->l2_slice_size; j++) { 1926 uint64_t l2_entry = be64_to_cpu(l2_slice[j]); 1927 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1928 QCow2ClusterType cluster_type = 1929 qcow2_get_cluster_type(l2_entry); 1930 1931 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1932 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1933 continue; 1934 } 1935 1936 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1937 if (!bs->backing) { 1938 /* not backed; therefore we can simply deallocate the 1939 * cluster */ 1940 l2_slice[j] = 0; 1941 l2_dirty = true; 1942 continue; 1943 } 1944 1945 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1946 if (offset < 0) { 1947 ret = offset; 1948 goto fail; 1949 } 1950 1951 if (l2_refcount > 1) { 1952 /* For shared L2 tables, set the refcount accordingly 1953 * (it is already 1 and needs to be l2_refcount) */ 1954 ret = qcow2_update_cluster_refcount( 1955 bs, offset >> s->cluster_bits, 1956 refcount_diff(1, l2_refcount), false, 1957 QCOW2_DISCARD_OTHER); 1958 if (ret < 0) { 1959 qcow2_free_clusters(bs, offset, s->cluster_size, 1960 QCOW2_DISCARD_OTHER); 1961 goto fail; 1962 } 1963 } 1964 } 1965 1966 if (offset_into_cluster(s, offset)) { 1967 int l2_index = slice * s->l2_slice_size + j; 1968 qcow2_signal_corruption( 1969 bs, true, -1, -1, 1970 "Cluster allocation offset " 1971 "%#" PRIx64 " unaligned (L2 offset: %#" 1972 PRIx64 ", L2 index: %#x)", offset, 1973 l2_offset, l2_index); 1974 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1975 qcow2_free_clusters(bs, offset, s->cluster_size, 1976 QCOW2_DISCARD_ALWAYS); 1977 } 1978 ret = -EIO; 1979 goto fail; 1980 } 1981 1982 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 1983 s->cluster_size); 1984 if (ret < 0) { 1985 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1986 qcow2_free_clusters(bs, offset, s->cluster_size, 1987 QCOW2_DISCARD_ALWAYS); 1988 } 1989 goto fail; 1990 } 1991 1992 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0); 1993 if (ret < 0) { 1994 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1995 qcow2_free_clusters(bs, offset, s->cluster_size, 1996 QCOW2_DISCARD_ALWAYS); 1997 } 1998 goto fail; 1999 } 2000 2001 if (l2_refcount == 1) { 2002 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 2003 } else { 2004 l2_slice[j] = cpu_to_be64(offset); 2005 } 2006 l2_dirty = true; 2007 } 2008 2009 if (is_active_l1) { 2010 if (l2_dirty) { 2011 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2012 qcow2_cache_depends_on_flush(s->l2_table_cache); 2013 } 2014 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2015 } else { 2016 if (l2_dirty) { 2017 ret = qcow2_pre_write_overlap_check( 2018 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 2019 slice_offset, slice_size2); 2020 if (ret < 0) { 2021 goto fail; 2022 } 2023 2024 ret = bdrv_pwrite(bs->file, slice_offset, 2025 l2_slice, slice_size2); 2026 if (ret < 0) { 2027 goto fail; 2028 } 2029 } 2030 } 2031 } 2032 2033 (*visited_l1_entries)++; 2034 if (status_cb) { 2035 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2036 } 2037 } 2038 2039 ret = 0; 2040 2041 fail: 2042 if (l2_slice) { 2043 if (!is_active_l1) { 2044 qemu_vfree(l2_slice); 2045 } else { 2046 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2047 } 2048 } 2049 return ret; 2050 } 2051 2052 /* 2053 * For backed images, expands all zero clusters on the image. For non-backed 2054 * images, deallocates all non-pre-allocated zero clusters (and claims the 2055 * allocation for pre-allocated ones). This is important for downgrading to a 2056 * qcow2 version which doesn't yet support metadata zero clusters. 2057 */ 2058 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2059 BlockDriverAmendStatusCB *status_cb, 2060 void *cb_opaque) 2061 { 2062 BDRVQcow2State *s = bs->opaque; 2063 uint64_t *l1_table = NULL; 2064 int64_t l1_entries = 0, visited_l1_entries = 0; 2065 int ret; 2066 int i, j; 2067 2068 if (status_cb) { 2069 l1_entries = s->l1_size; 2070 for (i = 0; i < s->nb_snapshots; i++) { 2071 l1_entries += s->snapshots[i].l1_size; 2072 } 2073 } 2074 2075 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2076 &visited_l1_entries, l1_entries, 2077 status_cb, cb_opaque); 2078 if (ret < 0) { 2079 goto fail; 2080 } 2081 2082 /* Inactive L1 tables may point to active L2 tables - therefore it is 2083 * necessary to flush the L2 table cache before trying to access the L2 2084 * tables pointed to by inactive L1 entries (else we might try to expand 2085 * zero clusters that have already been expanded); furthermore, it is also 2086 * necessary to empty the L2 table cache, since it may contain tables which 2087 * are now going to be modified directly on disk, bypassing the cache. 2088 * qcow2_cache_empty() does both for us. */ 2089 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2090 if (ret < 0) { 2091 goto fail; 2092 } 2093 2094 for (i = 0; i < s->nb_snapshots; i++) { 2095 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size * 2096 sizeof(uint64_t), BDRV_SECTOR_SIZE); 2097 2098 uint64_t *new_l1_table = 2099 g_try_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 2100 2101 if (!new_l1_table) { 2102 ret = -ENOMEM; 2103 goto fail; 2104 } 2105 2106 l1_table = new_l1_table; 2107 2108 ret = bdrv_read(bs->file, 2109 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, 2110 (void *)l1_table, l1_sectors); 2111 if (ret < 0) { 2112 goto fail; 2113 } 2114 2115 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2116 be64_to_cpus(&l1_table[j]); 2117 } 2118 2119 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2120 &visited_l1_entries, l1_entries, 2121 status_cb, cb_opaque); 2122 if (ret < 0) { 2123 goto fail; 2124 } 2125 } 2126 2127 ret = 0; 2128 2129 fail: 2130 g_free(l1_table); 2131 return ret; 2132 } 2133