1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "block/qcow2.h" 32 #include "qemu/bswap.h" 33 #include "trace.h" 34 35 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 36 { 37 BDRVQcow2State *s = bs->opaque; 38 int new_l1_size, i, ret; 39 40 if (exact_size >= s->l1_size) { 41 return 0; 42 } 43 44 new_l1_size = exact_size; 45 46 #ifdef DEBUG_ALLOC2 47 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 48 #endif 49 50 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 51 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 52 new_l1_size * sizeof(uint64_t), 53 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); 54 if (ret < 0) { 55 goto fail; 56 } 57 58 ret = bdrv_flush(bs->file->bs); 59 if (ret < 0) { 60 goto fail; 61 } 62 63 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 64 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 65 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 66 continue; 67 } 68 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 69 s->cluster_size, QCOW2_DISCARD_ALWAYS); 70 s->l1_table[i] = 0; 71 } 72 return 0; 73 74 fail: 75 /* 76 * If the write in the l1_table failed the image may contain a partially 77 * overwritten l1_table. In this case it would be better to clear the 78 * l1_table in memory to avoid possible image corruption. 79 */ 80 memset(s->l1_table + new_l1_size, 0, 81 (s->l1_size - new_l1_size) * sizeof(uint64_t)); 82 return ret; 83 } 84 85 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 86 bool exact_size) 87 { 88 BDRVQcow2State *s = bs->opaque; 89 int new_l1_size2, ret, i; 90 uint64_t *new_l1_table; 91 int64_t old_l1_table_offset, old_l1_size; 92 int64_t new_l1_table_offset, new_l1_size; 93 uint8_t data[12]; 94 95 if (min_size <= s->l1_size) 96 return 0; 97 98 /* Do a sanity check on min_size before trying to calculate new_l1_size 99 * (this prevents overflows during the while loop for the calculation of 100 * new_l1_size) */ 101 if (min_size > INT_MAX / sizeof(uint64_t)) { 102 return -EFBIG; 103 } 104 105 if (exact_size) { 106 new_l1_size = min_size; 107 } else { 108 /* Bump size up to reduce the number of times we have to grow */ 109 new_l1_size = s->l1_size; 110 if (new_l1_size == 0) { 111 new_l1_size = 1; 112 } 113 while (min_size > new_l1_size) { 114 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 115 } 116 } 117 118 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 119 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 120 return -EFBIG; 121 } 122 123 #ifdef DEBUG_ALLOC2 124 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 125 s->l1_size, new_l1_size); 126 #endif 127 128 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 129 new_l1_table = qemu_try_blockalign(bs->file->bs, 130 ROUND_UP(new_l1_size2, 512)); 131 if (new_l1_table == NULL) { 132 return -ENOMEM; 133 } 134 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512)); 135 136 if (s->l1_size) { 137 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 138 } 139 140 /* write new table (align to cluster) */ 141 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 142 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 143 if (new_l1_table_offset < 0) { 144 qemu_vfree(new_l1_table); 145 return new_l1_table_offset; 146 } 147 148 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 149 if (ret < 0) { 150 goto fail; 151 } 152 153 /* the L1 position has not yet been updated, so these clusters must 154 * indeed be completely free */ 155 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 156 new_l1_size2); 157 if (ret < 0) { 158 goto fail; 159 } 160 161 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 162 for(i = 0; i < s->l1_size; i++) 163 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 164 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 165 new_l1_table, new_l1_size2); 166 if (ret < 0) 167 goto fail; 168 for(i = 0; i < s->l1_size; i++) 169 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 170 171 /* set new table */ 172 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 173 stl_be_p(data, new_l1_size); 174 stq_be_p(data + 4, new_l1_table_offset); 175 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 176 data, sizeof(data)); 177 if (ret < 0) { 178 goto fail; 179 } 180 qemu_vfree(s->l1_table); 181 old_l1_table_offset = s->l1_table_offset; 182 s->l1_table_offset = new_l1_table_offset; 183 s->l1_table = new_l1_table; 184 old_l1_size = s->l1_size; 185 s->l1_size = new_l1_size; 186 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 187 QCOW2_DISCARD_OTHER); 188 return 0; 189 fail: 190 qemu_vfree(new_l1_table); 191 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 192 QCOW2_DISCARD_OTHER); 193 return ret; 194 } 195 196 /* 197 * l2_load 198 * 199 * @bs: The BlockDriverState 200 * @offset: A guest offset, used to calculate what slice of the L2 201 * table to load. 202 * @l2_offset: Offset to the L2 table in the image file. 203 * @l2_slice: Location to store the pointer to the L2 slice. 204 * 205 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 206 * that are loaded by the qcow2 cache). If the slice is in the cache, 207 * the cache is used; otherwise the L2 slice is loaded from the image 208 * file. 209 */ 210 static int l2_load(BlockDriverState *bs, uint64_t offset, 211 uint64_t l2_offset, uint64_t **l2_slice) 212 { 213 BDRVQcow2State *s = bs->opaque; 214 int start_of_slice = sizeof(uint64_t) * 215 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 216 217 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 218 (void **)l2_slice); 219 } 220 221 /* 222 * Writes one sector of the L1 table to the disk (can't update single entries 223 * and we really don't want bdrv_pread to perform a read-modify-write) 224 */ 225 #define L1_ENTRIES_PER_SECTOR (512 / 8) 226 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 227 { 228 BDRVQcow2State *s = bs->opaque; 229 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 230 int l1_start_index; 231 int i, ret; 232 233 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 234 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 235 i++) 236 { 237 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 238 } 239 240 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 241 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 242 if (ret < 0) { 243 return ret; 244 } 245 246 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 247 ret = bdrv_pwrite_sync(bs->file, 248 s->l1_table_offset + 8 * l1_start_index, 249 buf, sizeof(buf)); 250 if (ret < 0) { 251 return ret; 252 } 253 254 return 0; 255 } 256 257 /* 258 * l2_allocate 259 * 260 * Allocate a new l2 entry in the file. If l1_index points to an already 261 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 262 * table) copy the contents of the old L2 table into the newly allocated one. 263 * Otherwise the new table is initialized with zeros. 264 * 265 */ 266 267 static int l2_allocate(BlockDriverState *bs, int l1_index) 268 { 269 BDRVQcow2State *s = bs->opaque; 270 uint64_t old_l2_offset; 271 uint64_t *l2_slice = NULL; 272 unsigned slice, slice_size2, n_slices; 273 int64_t l2_offset; 274 int ret; 275 276 old_l2_offset = s->l1_table[l1_index]; 277 278 trace_qcow2_l2_allocate(bs, l1_index); 279 280 /* allocate a new l2 entry */ 281 282 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 283 if (l2_offset < 0) { 284 ret = l2_offset; 285 goto fail; 286 } 287 288 /* If we're allocating the table at offset 0 then something is wrong */ 289 if (l2_offset == 0) { 290 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 291 "allocation of L2 table at offset 0"); 292 ret = -EIO; 293 goto fail; 294 } 295 296 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 297 if (ret < 0) { 298 goto fail; 299 } 300 301 /* allocate a new entry in the l2 cache */ 302 303 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 304 n_slices = s->cluster_size / slice_size2; 305 306 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 307 for (slice = 0; slice < n_slices; slice++) { 308 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 309 l2_offset + slice * slice_size2, 310 (void **) &l2_slice); 311 if (ret < 0) { 312 goto fail; 313 } 314 315 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 316 /* if there was no old l2 table, clear the new slice */ 317 memset(l2_slice, 0, slice_size2); 318 } else { 319 uint64_t *old_slice; 320 uint64_t old_l2_slice_offset = 321 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 322 323 /* if there was an old l2 table, read a slice from the disk */ 324 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 325 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 326 (void **) &old_slice); 327 if (ret < 0) { 328 goto fail; 329 } 330 331 memcpy(l2_slice, old_slice, slice_size2); 332 333 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 334 } 335 336 /* write the l2 slice to the file */ 337 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 338 339 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 340 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 341 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 342 } 343 344 ret = qcow2_cache_flush(bs, s->l2_table_cache); 345 if (ret < 0) { 346 goto fail; 347 } 348 349 /* update the L1 entry */ 350 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 351 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 352 ret = qcow2_write_l1_entry(bs, l1_index); 353 if (ret < 0) { 354 goto fail; 355 } 356 357 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 358 return 0; 359 360 fail: 361 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 362 if (l2_slice != NULL) { 363 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 364 } 365 s->l1_table[l1_index] = old_l2_offset; 366 if (l2_offset > 0) { 367 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 368 QCOW2_DISCARD_ALWAYS); 369 } 370 return ret; 371 } 372 373 /* 374 * Checks how many clusters in a given L2 slice are contiguous in the image 375 * file. As soon as one of the flags in the bitmask stop_flags changes compared 376 * to the first cluster, the search is stopped and the cluster is not counted 377 * as contiguous. (This allows it, for example, to stop at the first compressed 378 * cluster which may require a different handling) 379 */ 380 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 381 uint64_t *l2_slice, uint64_t stop_flags) 382 { 383 int i; 384 QCow2ClusterType first_cluster_type; 385 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 386 uint64_t first_entry = be64_to_cpu(l2_slice[0]); 387 uint64_t offset = first_entry & mask; 388 389 if (!offset) { 390 return 0; 391 } 392 393 /* must be allocated */ 394 first_cluster_type = qcow2_get_cluster_type(first_entry); 395 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 396 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 397 398 for (i = 0; i < nb_clusters; i++) { 399 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask; 400 if (offset + (uint64_t) i * cluster_size != l2_entry) { 401 break; 402 } 403 } 404 405 return i; 406 } 407 408 /* 409 * Checks how many consecutive unallocated clusters in a given L2 410 * slice have the same cluster type. 411 */ 412 static int count_contiguous_clusters_unallocated(int nb_clusters, 413 uint64_t *l2_slice, 414 QCow2ClusterType wanted_type) 415 { 416 int i; 417 418 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 419 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 420 for (i = 0; i < nb_clusters; i++) { 421 uint64_t entry = be64_to_cpu(l2_slice[i]); 422 QCow2ClusterType type = qcow2_get_cluster_type(entry); 423 424 if (type != wanted_type) { 425 break; 426 } 427 } 428 429 return i; 430 } 431 432 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 433 uint64_t src_cluster_offset, 434 unsigned offset_in_cluster, 435 QEMUIOVector *qiov) 436 { 437 int ret; 438 439 if (qiov->size == 0) { 440 return 0; 441 } 442 443 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 444 445 if (!bs->drv) { 446 return -ENOMEDIUM; 447 } 448 449 /* Call .bdrv_co_readv() directly instead of using the public block-layer 450 * interface. This avoids double I/O throttling and request tracking, 451 * which can lead to deadlock when block layer copy-on-read is enabled. 452 */ 453 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 454 qiov->size, qiov, 0); 455 if (ret < 0) { 456 return ret; 457 } 458 459 return 0; 460 } 461 462 static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs, 463 uint64_t src_cluster_offset, 464 uint64_t cluster_offset, 465 unsigned offset_in_cluster, 466 uint8_t *buffer, 467 unsigned bytes) 468 { 469 if (bytes && bs->encrypted) { 470 BDRVQcow2State *s = bs->opaque; 471 int64_t offset = (s->crypt_physical_offset ? 472 (cluster_offset + offset_in_cluster) : 473 (src_cluster_offset + offset_in_cluster)); 474 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 475 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 476 assert(s->crypto); 477 if (qcrypto_block_encrypt(s->crypto, offset, buffer, bytes, NULL) < 0) { 478 return false; 479 } 480 } 481 return true; 482 } 483 484 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 485 uint64_t cluster_offset, 486 unsigned offset_in_cluster, 487 QEMUIOVector *qiov) 488 { 489 int ret; 490 491 if (qiov->size == 0) { 492 return 0; 493 } 494 495 ret = qcow2_pre_write_overlap_check(bs, 0, 496 cluster_offset + offset_in_cluster, qiov->size); 497 if (ret < 0) { 498 return ret; 499 } 500 501 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 502 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, 503 qiov->size, qiov, 0); 504 if (ret < 0) { 505 return ret; 506 } 507 508 return 0; 509 } 510 511 512 /* 513 * get_cluster_offset 514 * 515 * For a given offset of the virtual disk, find the cluster type and offset in 516 * the qcow2 file. The offset is stored in *cluster_offset. 517 * 518 * On entry, *bytes is the maximum number of contiguous bytes starting at 519 * offset that we are interested in. 520 * 521 * On exit, *bytes is the number of bytes starting at offset that have the same 522 * cluster type and (if applicable) are stored contiguously in the image file. 523 * Compressed clusters are always returned one by one. 524 * 525 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 526 * cases. 527 */ 528 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 529 unsigned int *bytes, uint64_t *cluster_offset) 530 { 531 BDRVQcow2State *s = bs->opaque; 532 unsigned int l2_index; 533 uint64_t l1_index, l2_offset, *l2_slice; 534 int c; 535 unsigned int offset_in_cluster; 536 uint64_t bytes_available, bytes_needed, nb_clusters; 537 QCow2ClusterType type; 538 int ret; 539 540 offset_in_cluster = offset_into_cluster(s, offset); 541 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 542 543 /* compute how many bytes there are between the start of the cluster 544 * containing offset and the end of the l2 slice that contains 545 * the entry pointing to it */ 546 bytes_available = 547 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 548 << s->cluster_bits; 549 550 if (bytes_needed > bytes_available) { 551 bytes_needed = bytes_available; 552 } 553 554 *cluster_offset = 0; 555 556 /* seek to the l2 offset in the l1 table */ 557 558 l1_index = offset_to_l1_index(s, offset); 559 if (l1_index >= s->l1_size) { 560 type = QCOW2_CLUSTER_UNALLOCATED; 561 goto out; 562 } 563 564 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 565 if (!l2_offset) { 566 type = QCOW2_CLUSTER_UNALLOCATED; 567 goto out; 568 } 569 570 if (offset_into_cluster(s, l2_offset)) { 571 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 572 " unaligned (L1 index: %#" PRIx64 ")", 573 l2_offset, l1_index); 574 return -EIO; 575 } 576 577 /* load the l2 slice in memory */ 578 579 ret = l2_load(bs, offset, l2_offset, &l2_slice); 580 if (ret < 0) { 581 return ret; 582 } 583 584 /* find the cluster offset for the given disk offset */ 585 586 l2_index = offset_to_l2_slice_index(s, offset); 587 *cluster_offset = be64_to_cpu(l2_slice[l2_index]); 588 589 nb_clusters = size_to_clusters(s, bytes_needed); 590 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 591 * integers; the minimum cluster size is 512, so this assertion is always 592 * true */ 593 assert(nb_clusters <= INT_MAX); 594 595 type = qcow2_get_cluster_type(*cluster_offset); 596 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 597 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 598 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 599 " in pre-v3 image (L2 offset: %#" PRIx64 600 ", L2 index: %#x)", l2_offset, l2_index); 601 ret = -EIO; 602 goto fail; 603 } 604 switch (type) { 605 case QCOW2_CLUSTER_COMPRESSED: 606 /* Compressed clusters can only be processed one by one */ 607 c = 1; 608 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 609 break; 610 case QCOW2_CLUSTER_ZERO_PLAIN: 611 case QCOW2_CLUSTER_UNALLOCATED: 612 /* how many empty clusters ? */ 613 c = count_contiguous_clusters_unallocated(nb_clusters, 614 &l2_slice[l2_index], type); 615 *cluster_offset = 0; 616 break; 617 case QCOW2_CLUSTER_ZERO_ALLOC: 618 case QCOW2_CLUSTER_NORMAL: 619 /* how many allocated clusters ? */ 620 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 621 &l2_slice[l2_index], QCOW_OFLAG_ZERO); 622 *cluster_offset &= L2E_OFFSET_MASK; 623 if (offset_into_cluster(s, *cluster_offset)) { 624 qcow2_signal_corruption(bs, true, -1, -1, 625 "Cluster allocation offset %#" 626 PRIx64 " unaligned (L2 offset: %#" PRIx64 627 ", L2 index: %#x)", *cluster_offset, 628 l2_offset, l2_index); 629 ret = -EIO; 630 goto fail; 631 } 632 break; 633 default: 634 abort(); 635 } 636 637 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 638 639 bytes_available = (int64_t)c * s->cluster_size; 640 641 out: 642 if (bytes_available > bytes_needed) { 643 bytes_available = bytes_needed; 644 } 645 646 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 647 * subtracting offset_in_cluster will therefore definitely yield something 648 * not exceeding UINT_MAX */ 649 assert(bytes_available - offset_in_cluster <= UINT_MAX); 650 *bytes = bytes_available - offset_in_cluster; 651 652 return type; 653 654 fail: 655 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 656 return ret; 657 } 658 659 /* 660 * get_cluster_table 661 * 662 * for a given disk offset, load (and allocate if needed) 663 * the appropriate slice of its l2 table. 664 * 665 * the cluster index in the l2 slice is given to the caller. 666 * 667 * Returns 0 on success, -errno in failure case 668 */ 669 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 670 uint64_t **new_l2_slice, 671 int *new_l2_index) 672 { 673 BDRVQcow2State *s = bs->opaque; 674 unsigned int l2_index; 675 uint64_t l1_index, l2_offset; 676 uint64_t *l2_slice = NULL; 677 int ret; 678 679 /* seek to the l2 offset in the l1 table */ 680 681 l1_index = offset_to_l1_index(s, offset); 682 if (l1_index >= s->l1_size) { 683 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 684 if (ret < 0) { 685 return ret; 686 } 687 } 688 689 assert(l1_index < s->l1_size); 690 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 691 if (offset_into_cluster(s, l2_offset)) { 692 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 693 " unaligned (L1 index: %#" PRIx64 ")", 694 l2_offset, l1_index); 695 return -EIO; 696 } 697 698 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 699 /* First allocate a new L2 table (and do COW if needed) */ 700 ret = l2_allocate(bs, l1_index); 701 if (ret < 0) { 702 return ret; 703 } 704 705 /* Then decrease the refcount of the old table */ 706 if (l2_offset) { 707 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 708 QCOW2_DISCARD_OTHER); 709 } 710 711 /* Get the offset of the newly-allocated l2 table */ 712 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 713 assert(offset_into_cluster(s, l2_offset) == 0); 714 } 715 716 /* load the l2 slice in memory */ 717 ret = l2_load(bs, offset, l2_offset, &l2_slice); 718 if (ret < 0) { 719 return ret; 720 } 721 722 /* find the cluster offset for the given disk offset */ 723 724 l2_index = offset_to_l2_slice_index(s, offset); 725 726 *new_l2_slice = l2_slice; 727 *new_l2_index = l2_index; 728 729 return 0; 730 } 731 732 /* 733 * alloc_compressed_cluster_offset 734 * 735 * For a given offset of the disk image, return cluster offset in 736 * qcow2 file. 737 * 738 * If the offset is not found, allocate a new compressed cluster. 739 * 740 * Return the cluster offset if successful, 741 * Return 0, otherwise. 742 * 743 */ 744 745 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 746 uint64_t offset, 747 int compressed_size) 748 { 749 BDRVQcow2State *s = bs->opaque; 750 int l2_index, ret; 751 uint64_t *l2_slice; 752 int64_t cluster_offset; 753 int nb_csectors; 754 755 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 756 if (ret < 0) { 757 return 0; 758 } 759 760 /* Compression can't overwrite anything. Fail if the cluster was already 761 * allocated. */ 762 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 763 if (cluster_offset & L2E_OFFSET_MASK) { 764 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 765 return 0; 766 } 767 768 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 769 if (cluster_offset < 0) { 770 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 771 return 0; 772 } 773 774 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 775 (cluster_offset >> 9); 776 777 cluster_offset |= QCOW_OFLAG_COMPRESSED | 778 ((uint64_t)nb_csectors << s->csize_shift); 779 780 /* update L2 table */ 781 782 /* compressed clusters never have the copied flag */ 783 784 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 785 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 786 l2_slice[l2_index] = cpu_to_be64(cluster_offset); 787 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 788 789 return cluster_offset; 790 } 791 792 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 793 { 794 BDRVQcow2State *s = bs->opaque; 795 Qcow2COWRegion *start = &m->cow_start; 796 Qcow2COWRegion *end = &m->cow_end; 797 unsigned buffer_size; 798 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 799 bool merge_reads; 800 uint8_t *start_buffer, *end_buffer; 801 QEMUIOVector qiov; 802 int ret; 803 804 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 805 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 806 assert(start->offset + start->nb_bytes <= end->offset); 807 assert(!m->data_qiov || m->data_qiov->size == data_bytes); 808 809 if (start->nb_bytes == 0 && end->nb_bytes == 0) { 810 return 0; 811 } 812 813 /* If we have to read both the start and end COW regions and the 814 * middle region is not too large then perform just one read 815 * operation */ 816 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 817 if (merge_reads) { 818 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 819 } else { 820 /* If we have to do two reads, add some padding in the middle 821 * if necessary to make sure that the end region is optimally 822 * aligned. */ 823 size_t align = bdrv_opt_mem_align(bs); 824 assert(align > 0 && align <= UINT_MAX); 825 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 826 UINT_MAX - end->nb_bytes); 827 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 828 } 829 830 /* Reserve a buffer large enough to store all the data that we're 831 * going to read */ 832 start_buffer = qemu_try_blockalign(bs, buffer_size); 833 if (start_buffer == NULL) { 834 return -ENOMEM; 835 } 836 /* The part of the buffer where the end region is located */ 837 end_buffer = start_buffer + buffer_size - end->nb_bytes; 838 839 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? m->data_qiov->niov : 0)); 840 841 qemu_co_mutex_unlock(&s->lock); 842 /* First we read the existing data from both COW regions. We 843 * either read the whole region in one go, or the start and end 844 * regions separately. */ 845 if (merge_reads) { 846 qemu_iovec_add(&qiov, start_buffer, buffer_size); 847 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 848 } else { 849 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 850 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 851 if (ret < 0) { 852 goto fail; 853 } 854 855 qemu_iovec_reset(&qiov); 856 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 857 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 858 } 859 if (ret < 0) { 860 goto fail; 861 } 862 863 /* Encrypt the data if necessary before writing it */ 864 if (bs->encrypted) { 865 if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 866 start->offset, start_buffer, 867 start->nb_bytes) || 868 !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset, 869 end->offset, end_buffer, end->nb_bytes)) { 870 ret = -EIO; 871 goto fail; 872 } 873 } 874 875 /* And now we can write everything. If we have the guest data we 876 * can write everything in one single operation */ 877 if (m->data_qiov) { 878 qemu_iovec_reset(&qiov); 879 if (start->nb_bytes) { 880 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 881 } 882 qemu_iovec_concat(&qiov, m->data_qiov, 0, data_bytes); 883 if (end->nb_bytes) { 884 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 885 } 886 /* NOTE: we have a write_aio blkdebug event here followed by 887 * a cow_write one in do_perform_cow_write(), but there's only 888 * one single I/O operation */ 889 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 890 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 891 } else { 892 /* If there's no guest data then write both COW regions separately */ 893 qemu_iovec_reset(&qiov); 894 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 895 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 896 if (ret < 0) { 897 goto fail; 898 } 899 900 qemu_iovec_reset(&qiov); 901 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 902 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 903 } 904 905 fail: 906 qemu_co_mutex_lock(&s->lock); 907 908 /* 909 * Before we update the L2 table to actually point to the new cluster, we 910 * need to be sure that the refcounts have been increased and COW was 911 * handled. 912 */ 913 if (ret == 0) { 914 qcow2_cache_depends_on_flush(s->l2_table_cache); 915 } 916 917 qemu_vfree(start_buffer); 918 qemu_iovec_destroy(&qiov); 919 return ret; 920 } 921 922 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 923 { 924 BDRVQcow2State *s = bs->opaque; 925 int i, j = 0, l2_index, ret; 926 uint64_t *old_cluster, *l2_slice; 927 uint64_t cluster_offset = m->alloc_offset; 928 929 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 930 assert(m->nb_clusters > 0); 931 932 old_cluster = g_try_new(uint64_t, m->nb_clusters); 933 if (old_cluster == NULL) { 934 ret = -ENOMEM; 935 goto err; 936 } 937 938 /* copy content of unmodified sectors */ 939 ret = perform_cow(bs, m); 940 if (ret < 0) { 941 goto err; 942 } 943 944 /* Update L2 table. */ 945 if (s->use_lazy_refcounts) { 946 qcow2_mark_dirty(bs); 947 } 948 if (qcow2_need_accurate_refcounts(s)) { 949 qcow2_cache_set_dependency(bs, s->l2_table_cache, 950 s->refcount_block_cache); 951 } 952 953 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 954 if (ret < 0) { 955 goto err; 956 } 957 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 958 959 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 960 for (i = 0; i < m->nb_clusters; i++) { 961 /* if two concurrent writes happen to the same unallocated cluster 962 * each write allocates separate cluster and writes data concurrently. 963 * The first one to complete updates l2 table with pointer to its 964 * cluster the second one has to do RMW (which is done above by 965 * perform_cow()), update l2 table with its cluster pointer and free 966 * old cluster. This is what this loop does */ 967 if (l2_slice[l2_index + i] != 0) { 968 old_cluster[j++] = l2_slice[l2_index + i]; 969 } 970 971 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset + 972 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 973 } 974 975 976 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 977 978 /* 979 * If this was a COW, we need to decrease the refcount of the old cluster. 980 * 981 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 982 * clusters), the next write will reuse them anyway. 983 */ 984 if (!m->keep_old_clusters && j != 0) { 985 for (i = 0; i < j; i++) { 986 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 987 QCOW2_DISCARD_NEVER); 988 } 989 } 990 991 ret = 0; 992 err: 993 g_free(old_cluster); 994 return ret; 995 } 996 997 /* 998 * Returns the number of contiguous clusters that can be used for an allocating 999 * write, but require COW to be performed (this includes yet unallocated space, 1000 * which must copy from the backing file) 1001 */ 1002 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 1003 uint64_t *l2_slice, int l2_index) 1004 { 1005 int i; 1006 1007 for (i = 0; i < nb_clusters; i++) { 1008 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1009 QCow2ClusterType cluster_type = qcow2_get_cluster_type(l2_entry); 1010 1011 switch(cluster_type) { 1012 case QCOW2_CLUSTER_NORMAL: 1013 if (l2_entry & QCOW_OFLAG_COPIED) { 1014 goto out; 1015 } 1016 break; 1017 case QCOW2_CLUSTER_UNALLOCATED: 1018 case QCOW2_CLUSTER_COMPRESSED: 1019 case QCOW2_CLUSTER_ZERO_PLAIN: 1020 case QCOW2_CLUSTER_ZERO_ALLOC: 1021 break; 1022 default: 1023 abort(); 1024 } 1025 } 1026 1027 out: 1028 assert(i <= nb_clusters); 1029 return i; 1030 } 1031 1032 /* 1033 * Check if there already is an AIO write request in flight which allocates 1034 * the same cluster. In this case we need to wait until the previous 1035 * request has completed and updated the L2 table accordingly. 1036 * 1037 * Returns: 1038 * 0 if there was no dependency. *cur_bytes indicates the number of 1039 * bytes from guest_offset that can be read before the next 1040 * dependency must be processed (or the request is complete) 1041 * 1042 * -EAGAIN if we had to wait for another request, previously gathered 1043 * information on cluster allocation may be invalid now. The caller 1044 * must start over anyway, so consider *cur_bytes undefined. 1045 */ 1046 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1047 uint64_t *cur_bytes, QCowL2Meta **m) 1048 { 1049 BDRVQcow2State *s = bs->opaque; 1050 QCowL2Meta *old_alloc; 1051 uint64_t bytes = *cur_bytes; 1052 1053 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1054 1055 uint64_t start = guest_offset; 1056 uint64_t end = start + bytes; 1057 uint64_t old_start = l2meta_cow_start(old_alloc); 1058 uint64_t old_end = l2meta_cow_end(old_alloc); 1059 1060 if (end <= old_start || start >= old_end) { 1061 /* No intersection */ 1062 } else { 1063 if (start < old_start) { 1064 /* Stop at the start of a running allocation */ 1065 bytes = old_start - start; 1066 } else { 1067 bytes = 0; 1068 } 1069 1070 /* Stop if already an l2meta exists. After yielding, it wouldn't 1071 * be valid any more, so we'd have to clean up the old L2Metas 1072 * and deal with requests depending on them before starting to 1073 * gather new ones. Not worth the trouble. */ 1074 if (bytes == 0 && *m) { 1075 *cur_bytes = 0; 1076 return 0; 1077 } 1078 1079 if (bytes == 0) { 1080 /* Wait for the dependency to complete. We need to recheck 1081 * the free/allocated clusters when we continue. */ 1082 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1083 return -EAGAIN; 1084 } 1085 } 1086 } 1087 1088 /* Make sure that existing clusters and new allocations are only used up to 1089 * the next dependency if we shortened the request above */ 1090 *cur_bytes = bytes; 1091 1092 return 0; 1093 } 1094 1095 /* 1096 * Checks how many already allocated clusters that don't require a copy on 1097 * write there are at the given guest_offset (up to *bytes). If 1098 * *host_offset is not zero, only physically contiguous clusters beginning at 1099 * this host offset are counted. 1100 * 1101 * Note that guest_offset may not be cluster aligned. In this case, the 1102 * returned *host_offset points to exact byte referenced by guest_offset and 1103 * therefore isn't cluster aligned as well. 1104 * 1105 * Returns: 1106 * 0: if no allocated clusters are available at the given offset. 1107 * *bytes is normally unchanged. It is set to 0 if the cluster 1108 * is allocated and doesn't need COW, but doesn't have the right 1109 * physical offset. 1110 * 1111 * 1: if allocated clusters that don't require a COW are available at 1112 * the requested offset. *bytes may have decreased and describes 1113 * the length of the area that can be written to. 1114 * 1115 * -errno: in error cases 1116 */ 1117 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1118 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1119 { 1120 BDRVQcow2State *s = bs->opaque; 1121 int l2_index; 1122 uint64_t cluster_offset; 1123 uint64_t *l2_slice; 1124 uint64_t nb_clusters; 1125 unsigned int keep_clusters; 1126 int ret; 1127 1128 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1129 *bytes); 1130 1131 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 1132 == offset_into_cluster(s, *host_offset)); 1133 1134 /* 1135 * Calculate the number of clusters to look for. We stop at L2 slice 1136 * boundaries to keep things simple. 1137 */ 1138 nb_clusters = 1139 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1140 1141 l2_index = offset_to_l2_slice_index(s, guest_offset); 1142 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1143 assert(nb_clusters <= INT_MAX); 1144 1145 /* Find L2 entry for the first involved cluster */ 1146 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1147 if (ret < 0) { 1148 return ret; 1149 } 1150 1151 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 1152 1153 /* Check how many clusters are already allocated and don't need COW */ 1154 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1155 && (cluster_offset & QCOW_OFLAG_COPIED)) 1156 { 1157 /* If a specific host_offset is required, check it */ 1158 bool offset_matches = 1159 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1160 1161 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1162 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1163 "%#llx unaligned (guest offset: %#" PRIx64 1164 ")", cluster_offset & L2E_OFFSET_MASK, 1165 guest_offset); 1166 ret = -EIO; 1167 goto out; 1168 } 1169 1170 if (*host_offset != 0 && !offset_matches) { 1171 *bytes = 0; 1172 ret = 0; 1173 goto out; 1174 } 1175 1176 /* We keep all QCOW_OFLAG_COPIED clusters */ 1177 keep_clusters = 1178 count_contiguous_clusters(nb_clusters, s->cluster_size, 1179 &l2_slice[l2_index], 1180 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1181 assert(keep_clusters <= nb_clusters); 1182 1183 *bytes = MIN(*bytes, 1184 keep_clusters * s->cluster_size 1185 - offset_into_cluster(s, guest_offset)); 1186 1187 ret = 1; 1188 } else { 1189 ret = 0; 1190 } 1191 1192 /* Cleanup */ 1193 out: 1194 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1195 1196 /* Only return a host offset if we actually made progress. Otherwise we 1197 * would make requirements for handle_alloc() that it can't fulfill */ 1198 if (ret > 0) { 1199 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1200 + offset_into_cluster(s, guest_offset); 1201 } 1202 1203 return ret; 1204 } 1205 1206 /* 1207 * Allocates new clusters for the given guest_offset. 1208 * 1209 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1210 * contain the number of clusters that have been allocated and are contiguous 1211 * in the image file. 1212 * 1213 * If *host_offset is non-zero, it specifies the offset in the image file at 1214 * which the new clusters must start. *nb_clusters can be 0 on return in this 1215 * case if the cluster at host_offset is already in use. If *host_offset is 1216 * zero, the clusters can be allocated anywhere in the image file. 1217 * 1218 * *host_offset is updated to contain the offset into the image file at which 1219 * the first allocated cluster starts. 1220 * 1221 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1222 * function has been waiting for another request and the allocation must be 1223 * restarted, but the whole request should not be failed. 1224 */ 1225 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1226 uint64_t *host_offset, uint64_t *nb_clusters) 1227 { 1228 BDRVQcow2State *s = bs->opaque; 1229 1230 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1231 *host_offset, *nb_clusters); 1232 1233 /* Allocate new clusters */ 1234 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1235 if (*host_offset == 0) { 1236 int64_t cluster_offset = 1237 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1238 if (cluster_offset < 0) { 1239 return cluster_offset; 1240 } 1241 *host_offset = cluster_offset; 1242 return 0; 1243 } else { 1244 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1245 if (ret < 0) { 1246 return ret; 1247 } 1248 *nb_clusters = ret; 1249 return 0; 1250 } 1251 } 1252 1253 /* 1254 * Allocates new clusters for an area that either is yet unallocated or needs a 1255 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1256 * the new allocation can match the specified host offset. 1257 * 1258 * Note that guest_offset may not be cluster aligned. In this case, the 1259 * returned *host_offset points to exact byte referenced by guest_offset and 1260 * therefore isn't cluster aligned as well. 1261 * 1262 * Returns: 1263 * 0: if no clusters could be allocated. *bytes is set to 0, 1264 * *host_offset is left unchanged. 1265 * 1266 * 1: if new clusters were allocated. *bytes may be decreased if the 1267 * new allocation doesn't cover all of the requested area. 1268 * *host_offset is updated to contain the host offset of the first 1269 * newly allocated cluster. 1270 * 1271 * -errno: in error cases 1272 */ 1273 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1274 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1275 { 1276 BDRVQcow2State *s = bs->opaque; 1277 int l2_index; 1278 uint64_t *l2_slice; 1279 uint64_t entry; 1280 uint64_t nb_clusters; 1281 int ret; 1282 bool keep_old_clusters = false; 1283 1284 uint64_t alloc_cluster_offset = 0; 1285 1286 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1287 *bytes); 1288 assert(*bytes > 0); 1289 1290 /* 1291 * Calculate the number of clusters to look for. We stop at L2 slice 1292 * boundaries to keep things simple. 1293 */ 1294 nb_clusters = 1295 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1296 1297 l2_index = offset_to_l2_slice_index(s, guest_offset); 1298 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1299 assert(nb_clusters <= INT_MAX); 1300 1301 /* Find L2 entry for the first involved cluster */ 1302 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1303 if (ret < 0) { 1304 return ret; 1305 } 1306 1307 entry = be64_to_cpu(l2_slice[l2_index]); 1308 1309 /* For the moment, overwrite compressed clusters one by one */ 1310 if (entry & QCOW_OFLAG_COMPRESSED) { 1311 nb_clusters = 1; 1312 } else { 1313 nb_clusters = count_cow_clusters(s, nb_clusters, l2_slice, l2_index); 1314 } 1315 1316 /* This function is only called when there were no non-COW clusters, so if 1317 * we can't find any unallocated or COW clusters either, something is 1318 * wrong with our code. */ 1319 assert(nb_clusters > 0); 1320 1321 if (qcow2_get_cluster_type(entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1322 (entry & QCOW_OFLAG_COPIED) && 1323 (!*host_offset || 1324 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1325 { 1326 int preallocated_nb_clusters; 1327 1328 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) { 1329 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero " 1330 "cluster offset %#llx unaligned (guest " 1331 "offset: %#" PRIx64 ")", 1332 entry & L2E_OFFSET_MASK, guest_offset); 1333 ret = -EIO; 1334 goto fail; 1335 } 1336 1337 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1338 * would be fine, too, but count_cow_clusters() above has limited 1339 * nb_clusters already to a range of COW clusters */ 1340 preallocated_nb_clusters = 1341 count_contiguous_clusters(nb_clusters, s->cluster_size, 1342 &l2_slice[l2_index], QCOW_OFLAG_COPIED); 1343 assert(preallocated_nb_clusters > 0); 1344 1345 nb_clusters = preallocated_nb_clusters; 1346 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1347 1348 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1349 * should not free them. */ 1350 keep_old_clusters = true; 1351 } 1352 1353 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1354 1355 if (!alloc_cluster_offset) { 1356 /* Allocate, if necessary at a given offset in the image file */ 1357 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1358 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1359 &nb_clusters); 1360 if (ret < 0) { 1361 goto fail; 1362 } 1363 1364 /* Can't extend contiguous allocation */ 1365 if (nb_clusters == 0) { 1366 *bytes = 0; 1367 return 0; 1368 } 1369 1370 /* !*host_offset would overwrite the image header and is reserved for 1371 * "no host offset preferred". If 0 was a valid host offset, it'd 1372 * trigger the following overlap check; do that now to avoid having an 1373 * invalid value in *host_offset. */ 1374 if (!alloc_cluster_offset) { 1375 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1376 nb_clusters * s->cluster_size); 1377 assert(ret < 0); 1378 goto fail; 1379 } 1380 } 1381 1382 /* 1383 * Save info needed for meta data update. 1384 * 1385 * requested_bytes: Number of bytes from the start of the first 1386 * newly allocated cluster to the end of the (possibly shortened 1387 * before) write request. 1388 * 1389 * avail_bytes: Number of bytes from the start of the first 1390 * newly allocated to the end of the last newly allocated cluster. 1391 * 1392 * nb_bytes: The number of bytes from the start of the first 1393 * newly allocated cluster to the end of the area that the write 1394 * request actually writes to (excluding COW at the end) 1395 */ 1396 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1397 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1398 int nb_bytes = MIN(requested_bytes, avail_bytes); 1399 QCowL2Meta *old_m = *m; 1400 1401 *m = g_malloc0(sizeof(**m)); 1402 1403 **m = (QCowL2Meta) { 1404 .next = old_m, 1405 1406 .alloc_offset = alloc_cluster_offset, 1407 .offset = start_of_cluster(s, guest_offset), 1408 .nb_clusters = nb_clusters, 1409 1410 .keep_old_clusters = keep_old_clusters, 1411 1412 .cow_start = { 1413 .offset = 0, 1414 .nb_bytes = offset_into_cluster(s, guest_offset), 1415 }, 1416 .cow_end = { 1417 .offset = nb_bytes, 1418 .nb_bytes = avail_bytes - nb_bytes, 1419 }, 1420 }; 1421 qemu_co_queue_init(&(*m)->dependent_requests); 1422 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1423 1424 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1425 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1426 assert(*bytes != 0); 1427 1428 return 1; 1429 1430 fail: 1431 if (*m && (*m)->nb_clusters > 0) { 1432 QLIST_REMOVE(*m, next_in_flight); 1433 } 1434 return ret; 1435 } 1436 1437 /* 1438 * alloc_cluster_offset 1439 * 1440 * For a given offset on the virtual disk, find the cluster offset in qcow2 1441 * file. If the offset is not found, allocate a new cluster. 1442 * 1443 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1444 * other fields in m are meaningless. 1445 * 1446 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1447 * contiguous clusters that have been allocated. In this case, the other 1448 * fields of m are valid and contain information about the first allocated 1449 * cluster. 1450 * 1451 * If the request conflicts with another write request in flight, the coroutine 1452 * is queued and will be reentered when the dependency has completed. 1453 * 1454 * Return 0 on success and -errno in error cases 1455 */ 1456 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1457 unsigned int *bytes, uint64_t *host_offset, 1458 QCowL2Meta **m) 1459 { 1460 BDRVQcow2State *s = bs->opaque; 1461 uint64_t start, remaining; 1462 uint64_t cluster_offset; 1463 uint64_t cur_bytes; 1464 int ret; 1465 1466 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1467 1468 again: 1469 start = offset; 1470 remaining = *bytes; 1471 cluster_offset = 0; 1472 *host_offset = 0; 1473 cur_bytes = 0; 1474 *m = NULL; 1475 1476 while (true) { 1477 1478 if (!*host_offset) { 1479 *host_offset = start_of_cluster(s, cluster_offset); 1480 } 1481 1482 assert(remaining >= cur_bytes); 1483 1484 start += cur_bytes; 1485 remaining -= cur_bytes; 1486 cluster_offset += cur_bytes; 1487 1488 if (remaining == 0) { 1489 break; 1490 } 1491 1492 cur_bytes = remaining; 1493 1494 /* 1495 * Now start gathering as many contiguous clusters as possible: 1496 * 1497 * 1. Check for overlaps with in-flight allocations 1498 * 1499 * a) Overlap not in the first cluster -> shorten this request and 1500 * let the caller handle the rest in its next loop iteration. 1501 * 1502 * b) Real overlaps of two requests. Yield and restart the search 1503 * for contiguous clusters (the situation could have changed 1504 * while we were sleeping) 1505 * 1506 * c) TODO: Request starts in the same cluster as the in-flight 1507 * allocation ends. Shorten the COW of the in-fight allocation, 1508 * set cluster_offset to write to the same cluster and set up 1509 * the right synchronisation between the in-flight request and 1510 * the new one. 1511 */ 1512 ret = handle_dependencies(bs, start, &cur_bytes, m); 1513 if (ret == -EAGAIN) { 1514 /* Currently handle_dependencies() doesn't yield if we already had 1515 * an allocation. If it did, we would have to clean up the L2Meta 1516 * structs before starting over. */ 1517 assert(*m == NULL); 1518 goto again; 1519 } else if (ret < 0) { 1520 return ret; 1521 } else if (cur_bytes == 0) { 1522 break; 1523 } else { 1524 /* handle_dependencies() may have decreased cur_bytes (shortened 1525 * the allocations below) so that the next dependency is processed 1526 * correctly during the next loop iteration. */ 1527 } 1528 1529 /* 1530 * 2. Count contiguous COPIED clusters. 1531 */ 1532 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1533 if (ret < 0) { 1534 return ret; 1535 } else if (ret) { 1536 continue; 1537 } else if (cur_bytes == 0) { 1538 break; 1539 } 1540 1541 /* 1542 * 3. If the request still hasn't completed, allocate new clusters, 1543 * considering any cluster_offset of steps 1c or 2. 1544 */ 1545 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1546 if (ret < 0) { 1547 return ret; 1548 } else if (ret) { 1549 continue; 1550 } else { 1551 assert(cur_bytes == 0); 1552 break; 1553 } 1554 } 1555 1556 *bytes -= remaining; 1557 assert(*bytes > 0); 1558 assert(*host_offset != 0); 1559 1560 return 0; 1561 } 1562 1563 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1564 const uint8_t *buf, int buf_size) 1565 { 1566 z_stream strm1, *strm = &strm1; 1567 int ret, out_len; 1568 1569 memset(strm, 0, sizeof(*strm)); 1570 1571 strm->next_in = (uint8_t *)buf; 1572 strm->avail_in = buf_size; 1573 strm->next_out = out_buf; 1574 strm->avail_out = out_buf_size; 1575 1576 ret = inflateInit2(strm, -12); 1577 if (ret != Z_OK) 1578 return -1; 1579 ret = inflate(strm, Z_FINISH); 1580 out_len = strm->next_out - out_buf; 1581 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1582 out_len != out_buf_size) { 1583 inflateEnd(strm); 1584 return -1; 1585 } 1586 inflateEnd(strm); 1587 return 0; 1588 } 1589 1590 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1591 { 1592 BDRVQcow2State *s = bs->opaque; 1593 int ret, csize, nb_csectors, sector_offset; 1594 uint64_t coffset; 1595 1596 coffset = cluster_offset & s->cluster_offset_mask; 1597 if (s->cluster_cache_offset != coffset) { 1598 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1599 sector_offset = coffset & 511; 1600 csize = nb_csectors * 512 - sector_offset; 1601 1602 /* Allocate buffers on first decompress operation, most images are 1603 * uncompressed and the memory overhead can be avoided. The buffers 1604 * are freed in .bdrv_close(). 1605 */ 1606 if (!s->cluster_data) { 1607 /* one more sector for decompressed data alignment */ 1608 s->cluster_data = qemu_try_blockalign(bs->file->bs, 1609 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size + 512); 1610 if (!s->cluster_data) { 1611 return -ENOMEM; 1612 } 1613 } 1614 if (!s->cluster_cache) { 1615 s->cluster_cache = g_malloc(s->cluster_size); 1616 } 1617 1618 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1619 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, 1620 nb_csectors); 1621 if (ret < 0) { 1622 return ret; 1623 } 1624 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1625 s->cluster_data + sector_offset, csize) < 0) { 1626 return -EIO; 1627 } 1628 s->cluster_cache_offset = coffset; 1629 } 1630 return 0; 1631 } 1632 1633 /* 1634 * This discards as many clusters of nb_clusters as possible at once (i.e. 1635 * all clusters in the same L2 slice) and returns the number of discarded 1636 * clusters. 1637 */ 1638 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1639 uint64_t nb_clusters, 1640 enum qcow2_discard_type type, bool full_discard) 1641 { 1642 BDRVQcow2State *s = bs->opaque; 1643 uint64_t *l2_slice; 1644 int l2_index; 1645 int ret; 1646 int i; 1647 1648 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1649 if (ret < 0) { 1650 return ret; 1651 } 1652 1653 /* Limit nb_clusters to one L2 slice */ 1654 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1655 assert(nb_clusters <= INT_MAX); 1656 1657 for (i = 0; i < nb_clusters; i++) { 1658 uint64_t old_l2_entry; 1659 1660 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1661 1662 /* 1663 * If full_discard is false, make sure that a discarded area reads back 1664 * as zeroes for v3 images (we cannot do it for v2 without actually 1665 * writing a zero-filled buffer). We can skip the operation if the 1666 * cluster is already marked as zero, or if it's unallocated and we 1667 * don't have a backing file. 1668 * 1669 * TODO We might want to use bdrv_block_status(bs) here, but we're 1670 * holding s->lock, so that doesn't work today. 1671 * 1672 * If full_discard is true, the sector should not read back as zeroes, 1673 * but rather fall through to the backing file. 1674 */ 1675 switch (qcow2_get_cluster_type(old_l2_entry)) { 1676 case QCOW2_CLUSTER_UNALLOCATED: 1677 if (full_discard || !bs->backing) { 1678 continue; 1679 } 1680 break; 1681 1682 case QCOW2_CLUSTER_ZERO_PLAIN: 1683 if (!full_discard) { 1684 continue; 1685 } 1686 break; 1687 1688 case QCOW2_CLUSTER_ZERO_ALLOC: 1689 case QCOW2_CLUSTER_NORMAL: 1690 case QCOW2_CLUSTER_COMPRESSED: 1691 break; 1692 1693 default: 1694 abort(); 1695 } 1696 1697 /* First remove L2 entries */ 1698 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1699 if (!full_discard && s->qcow_version >= 3) { 1700 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1701 } else { 1702 l2_slice[l2_index + i] = cpu_to_be64(0); 1703 } 1704 1705 /* Then decrease the refcount */ 1706 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1707 } 1708 1709 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1710 1711 return nb_clusters; 1712 } 1713 1714 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1715 uint64_t bytes, enum qcow2_discard_type type, 1716 bool full_discard) 1717 { 1718 BDRVQcow2State *s = bs->opaque; 1719 uint64_t end_offset = offset + bytes; 1720 uint64_t nb_clusters; 1721 int64_t cleared; 1722 int ret; 1723 1724 /* Caller must pass aligned values, except at image end */ 1725 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1726 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1727 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1728 1729 nb_clusters = size_to_clusters(s, bytes); 1730 1731 s->cache_discards = true; 1732 1733 /* Each L2 slice is handled by its own loop iteration */ 1734 while (nb_clusters > 0) { 1735 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1736 full_discard); 1737 if (cleared < 0) { 1738 ret = cleared; 1739 goto fail; 1740 } 1741 1742 nb_clusters -= cleared; 1743 offset += (cleared * s->cluster_size); 1744 } 1745 1746 ret = 0; 1747 fail: 1748 s->cache_discards = false; 1749 qcow2_process_discards(bs, ret); 1750 1751 return ret; 1752 } 1753 1754 /* 1755 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1756 * all clusters in the same L2 slice) and returns the number of zeroed 1757 * clusters. 1758 */ 1759 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1760 uint64_t nb_clusters, int flags) 1761 { 1762 BDRVQcow2State *s = bs->opaque; 1763 uint64_t *l2_slice; 1764 int l2_index; 1765 int ret; 1766 int i; 1767 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1768 1769 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1770 if (ret < 0) { 1771 return ret; 1772 } 1773 1774 /* Limit nb_clusters to one L2 slice */ 1775 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1776 assert(nb_clusters <= INT_MAX); 1777 1778 for (i = 0; i < nb_clusters; i++) { 1779 uint64_t old_offset; 1780 QCow2ClusterType cluster_type; 1781 1782 old_offset = be64_to_cpu(l2_slice[l2_index + i]); 1783 1784 /* 1785 * Minimize L2 changes if the cluster already reads back as 1786 * zeroes with correct allocation. 1787 */ 1788 cluster_type = qcow2_get_cluster_type(old_offset); 1789 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1790 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1791 continue; 1792 } 1793 1794 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1795 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1796 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1797 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1798 } else { 1799 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1800 } 1801 } 1802 1803 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1804 1805 return nb_clusters; 1806 } 1807 1808 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1809 uint64_t bytes, int flags) 1810 { 1811 BDRVQcow2State *s = bs->opaque; 1812 uint64_t end_offset = offset + bytes; 1813 uint64_t nb_clusters; 1814 int64_t cleared; 1815 int ret; 1816 1817 /* Caller must pass aligned values, except at image end */ 1818 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1819 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1820 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1821 1822 /* The zero flag is only supported by version 3 and newer */ 1823 if (s->qcow_version < 3) { 1824 return -ENOTSUP; 1825 } 1826 1827 /* Each L2 slice is handled by its own loop iteration */ 1828 nb_clusters = size_to_clusters(s, bytes); 1829 1830 s->cache_discards = true; 1831 1832 while (nb_clusters > 0) { 1833 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 1834 if (cleared < 0) { 1835 ret = cleared; 1836 goto fail; 1837 } 1838 1839 nb_clusters -= cleared; 1840 offset += (cleared * s->cluster_size); 1841 } 1842 1843 ret = 0; 1844 fail: 1845 s->cache_discards = false; 1846 qcow2_process_discards(bs, ret); 1847 1848 return ret; 1849 } 1850 1851 /* 1852 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1853 * non-backed non-pre-allocated zero clusters). 1854 * 1855 * l1_entries and *visited_l1_entries are used to keep track of progress for 1856 * status_cb(). l1_entries contains the total number of L1 entries and 1857 * *visited_l1_entries counts all visited L1 entries. 1858 */ 1859 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1860 int l1_size, int64_t *visited_l1_entries, 1861 int64_t l1_entries, 1862 BlockDriverAmendStatusCB *status_cb, 1863 void *cb_opaque) 1864 { 1865 BDRVQcow2State *s = bs->opaque; 1866 bool is_active_l1 = (l1_table == s->l1_table); 1867 uint64_t *l2_slice = NULL; 1868 unsigned slice, slice_size2, n_slices; 1869 int ret; 1870 int i, j; 1871 1872 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 1873 n_slices = s->cluster_size / slice_size2; 1874 1875 if (!is_active_l1) { 1876 /* inactive L2 tables require a buffer to be stored in when loading 1877 * them from disk */ 1878 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 1879 if (l2_slice == NULL) { 1880 return -ENOMEM; 1881 } 1882 } 1883 1884 for (i = 0; i < l1_size; i++) { 1885 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1886 uint64_t l2_refcount; 1887 1888 if (!l2_offset) { 1889 /* unallocated */ 1890 (*visited_l1_entries)++; 1891 if (status_cb) { 1892 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1893 } 1894 continue; 1895 } 1896 1897 if (offset_into_cluster(s, l2_offset)) { 1898 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1899 PRIx64 " unaligned (L1 index: %#x)", 1900 l2_offset, i); 1901 ret = -EIO; 1902 goto fail; 1903 } 1904 1905 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1906 &l2_refcount); 1907 if (ret < 0) { 1908 goto fail; 1909 } 1910 1911 for (slice = 0; slice < n_slices; slice++) { 1912 uint64_t slice_offset = l2_offset + slice * slice_size2; 1913 bool l2_dirty = false; 1914 if (is_active_l1) { 1915 /* get active L2 tables from cache */ 1916 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 1917 (void **)&l2_slice); 1918 } else { 1919 /* load inactive L2 tables from disk */ 1920 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 1921 } 1922 if (ret < 0) { 1923 goto fail; 1924 } 1925 1926 for (j = 0; j < s->l2_slice_size; j++) { 1927 uint64_t l2_entry = be64_to_cpu(l2_slice[j]); 1928 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1929 QCow2ClusterType cluster_type = 1930 qcow2_get_cluster_type(l2_entry); 1931 1932 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1933 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1934 continue; 1935 } 1936 1937 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1938 if (!bs->backing) { 1939 /* not backed; therefore we can simply deallocate the 1940 * cluster */ 1941 l2_slice[j] = 0; 1942 l2_dirty = true; 1943 continue; 1944 } 1945 1946 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1947 if (offset < 0) { 1948 ret = offset; 1949 goto fail; 1950 } 1951 1952 if (l2_refcount > 1) { 1953 /* For shared L2 tables, set the refcount accordingly 1954 * (it is already 1 and needs to be l2_refcount) */ 1955 ret = qcow2_update_cluster_refcount( 1956 bs, offset >> s->cluster_bits, 1957 refcount_diff(1, l2_refcount), false, 1958 QCOW2_DISCARD_OTHER); 1959 if (ret < 0) { 1960 qcow2_free_clusters(bs, offset, s->cluster_size, 1961 QCOW2_DISCARD_OTHER); 1962 goto fail; 1963 } 1964 } 1965 } 1966 1967 if (offset_into_cluster(s, offset)) { 1968 int l2_index = slice * s->l2_slice_size + j; 1969 qcow2_signal_corruption( 1970 bs, true, -1, -1, 1971 "Cluster allocation offset " 1972 "%#" PRIx64 " unaligned (L2 offset: %#" 1973 PRIx64 ", L2 index: %#x)", offset, 1974 l2_offset, l2_index); 1975 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1976 qcow2_free_clusters(bs, offset, s->cluster_size, 1977 QCOW2_DISCARD_ALWAYS); 1978 } 1979 ret = -EIO; 1980 goto fail; 1981 } 1982 1983 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 1984 s->cluster_size); 1985 if (ret < 0) { 1986 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1987 qcow2_free_clusters(bs, offset, s->cluster_size, 1988 QCOW2_DISCARD_ALWAYS); 1989 } 1990 goto fail; 1991 } 1992 1993 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0); 1994 if (ret < 0) { 1995 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1996 qcow2_free_clusters(bs, offset, s->cluster_size, 1997 QCOW2_DISCARD_ALWAYS); 1998 } 1999 goto fail; 2000 } 2001 2002 if (l2_refcount == 1) { 2003 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 2004 } else { 2005 l2_slice[j] = cpu_to_be64(offset); 2006 } 2007 l2_dirty = true; 2008 } 2009 2010 if (is_active_l1) { 2011 if (l2_dirty) { 2012 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2013 qcow2_cache_depends_on_flush(s->l2_table_cache); 2014 } 2015 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2016 } else { 2017 if (l2_dirty) { 2018 ret = qcow2_pre_write_overlap_check( 2019 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 2020 slice_offset, slice_size2); 2021 if (ret < 0) { 2022 goto fail; 2023 } 2024 2025 ret = bdrv_pwrite(bs->file, slice_offset, 2026 l2_slice, slice_size2); 2027 if (ret < 0) { 2028 goto fail; 2029 } 2030 } 2031 } 2032 } 2033 2034 (*visited_l1_entries)++; 2035 if (status_cb) { 2036 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2037 } 2038 } 2039 2040 ret = 0; 2041 2042 fail: 2043 if (l2_slice) { 2044 if (!is_active_l1) { 2045 qemu_vfree(l2_slice); 2046 } else { 2047 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2048 } 2049 } 2050 return ret; 2051 } 2052 2053 /* 2054 * For backed images, expands all zero clusters on the image. For non-backed 2055 * images, deallocates all non-pre-allocated zero clusters (and claims the 2056 * allocation for pre-allocated ones). This is important for downgrading to a 2057 * qcow2 version which doesn't yet support metadata zero clusters. 2058 */ 2059 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2060 BlockDriverAmendStatusCB *status_cb, 2061 void *cb_opaque) 2062 { 2063 BDRVQcow2State *s = bs->opaque; 2064 uint64_t *l1_table = NULL; 2065 int64_t l1_entries = 0, visited_l1_entries = 0; 2066 int ret; 2067 int i, j; 2068 2069 if (status_cb) { 2070 l1_entries = s->l1_size; 2071 for (i = 0; i < s->nb_snapshots; i++) { 2072 l1_entries += s->snapshots[i].l1_size; 2073 } 2074 } 2075 2076 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2077 &visited_l1_entries, l1_entries, 2078 status_cb, cb_opaque); 2079 if (ret < 0) { 2080 goto fail; 2081 } 2082 2083 /* Inactive L1 tables may point to active L2 tables - therefore it is 2084 * necessary to flush the L2 table cache before trying to access the L2 2085 * tables pointed to by inactive L1 entries (else we might try to expand 2086 * zero clusters that have already been expanded); furthermore, it is also 2087 * necessary to empty the L2 table cache, since it may contain tables which 2088 * are now going to be modified directly on disk, bypassing the cache. 2089 * qcow2_cache_empty() does both for us. */ 2090 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2091 if (ret < 0) { 2092 goto fail; 2093 } 2094 2095 for (i = 0; i < s->nb_snapshots; i++) { 2096 int l1_size2; 2097 uint64_t *new_l1_table; 2098 Error *local_err = NULL; 2099 2100 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, 2101 s->snapshots[i].l1_size, sizeof(uint64_t), 2102 QCOW_MAX_L1_SIZE, "Snapshot L1 table", 2103 &local_err); 2104 if (ret < 0) { 2105 error_report_err(local_err); 2106 goto fail; 2107 } 2108 2109 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t); 2110 new_l1_table = g_try_realloc(l1_table, l1_size2); 2111 2112 if (!new_l1_table) { 2113 ret = -ENOMEM; 2114 goto fail; 2115 } 2116 2117 l1_table = new_l1_table; 2118 2119 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, 2120 l1_table, l1_size2); 2121 if (ret < 0) { 2122 goto fail; 2123 } 2124 2125 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2126 be64_to_cpus(&l1_table[j]); 2127 } 2128 2129 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2130 &visited_l1_entries, l1_entries, 2131 status_cb, cb_opaque); 2132 if (ret < 0) { 2133 goto fail; 2134 } 2135 } 2136 2137 ret = 0; 2138 2139 fail: 2140 g_free(l1_table); 2141 return ret; 2142 } 2143