1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qcow2.h" 30 #include "qemu/bswap.h" 31 #include "trace.h" 32 33 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 34 { 35 BDRVQcow2State *s = bs->opaque; 36 int new_l1_size, i, ret; 37 38 if (exact_size >= s->l1_size) { 39 return 0; 40 } 41 42 new_l1_size = exact_size; 43 44 #ifdef DEBUG_ALLOC2 45 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 46 #endif 47 48 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 49 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 50 new_l1_size * sizeof(uint64_t), 51 (s->l1_size - new_l1_size) * sizeof(uint64_t), 0); 52 if (ret < 0) { 53 goto fail; 54 } 55 56 ret = bdrv_flush(bs->file->bs); 57 if (ret < 0) { 58 goto fail; 59 } 60 61 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 62 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 63 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 64 continue; 65 } 66 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 67 s->cluster_size, QCOW2_DISCARD_ALWAYS); 68 s->l1_table[i] = 0; 69 } 70 return 0; 71 72 fail: 73 /* 74 * If the write in the l1_table failed the image may contain a partially 75 * overwritten l1_table. In this case it would be better to clear the 76 * l1_table in memory to avoid possible image corruption. 77 */ 78 memset(s->l1_table + new_l1_size, 0, 79 (s->l1_size - new_l1_size) * sizeof(uint64_t)); 80 return ret; 81 } 82 83 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 84 bool exact_size) 85 { 86 BDRVQcow2State *s = bs->opaque; 87 int new_l1_size2, ret, i; 88 uint64_t *new_l1_table; 89 int64_t old_l1_table_offset, old_l1_size; 90 int64_t new_l1_table_offset, new_l1_size; 91 uint8_t data[12]; 92 93 if (min_size <= s->l1_size) 94 return 0; 95 96 /* Do a sanity check on min_size before trying to calculate new_l1_size 97 * (this prevents overflows during the while loop for the calculation of 98 * new_l1_size) */ 99 if (min_size > INT_MAX / sizeof(uint64_t)) { 100 return -EFBIG; 101 } 102 103 if (exact_size) { 104 new_l1_size = min_size; 105 } else { 106 /* Bump size up to reduce the number of times we have to grow */ 107 new_l1_size = s->l1_size; 108 if (new_l1_size == 0) { 109 new_l1_size = 1; 110 } 111 while (min_size > new_l1_size) { 112 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 113 } 114 } 115 116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 117 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 118 return -EFBIG; 119 } 120 121 #ifdef DEBUG_ALLOC2 122 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 123 s->l1_size, new_l1_size); 124 #endif 125 126 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 127 new_l1_table = qemu_try_blockalign(bs->file->bs, 128 ROUND_UP(new_l1_size2, 512)); 129 if (new_l1_table == NULL) { 130 return -ENOMEM; 131 } 132 memset(new_l1_table, 0, ROUND_UP(new_l1_size2, 512)); 133 134 if (s->l1_size) { 135 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 136 } 137 138 /* write new table (align to cluster) */ 139 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 140 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 141 if (new_l1_table_offset < 0) { 142 qemu_vfree(new_l1_table); 143 return new_l1_table_offset; 144 } 145 146 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 147 if (ret < 0) { 148 goto fail; 149 } 150 151 /* the L1 position has not yet been updated, so these clusters must 152 * indeed be completely free */ 153 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 154 new_l1_size2, false); 155 if (ret < 0) { 156 goto fail; 157 } 158 159 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 160 for(i = 0; i < s->l1_size; i++) 161 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 162 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 163 new_l1_table, new_l1_size2); 164 if (ret < 0) 165 goto fail; 166 for(i = 0; i < s->l1_size; i++) 167 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 168 169 /* set new table */ 170 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 171 stl_be_p(data, new_l1_size); 172 stq_be_p(data + 4, new_l1_table_offset); 173 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 174 data, sizeof(data)); 175 if (ret < 0) { 176 goto fail; 177 } 178 qemu_vfree(s->l1_table); 179 old_l1_table_offset = s->l1_table_offset; 180 s->l1_table_offset = new_l1_table_offset; 181 s->l1_table = new_l1_table; 182 old_l1_size = s->l1_size; 183 s->l1_size = new_l1_size; 184 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 185 QCOW2_DISCARD_OTHER); 186 return 0; 187 fail: 188 qemu_vfree(new_l1_table); 189 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 190 QCOW2_DISCARD_OTHER); 191 return ret; 192 } 193 194 /* 195 * l2_load 196 * 197 * @bs: The BlockDriverState 198 * @offset: A guest offset, used to calculate what slice of the L2 199 * table to load. 200 * @l2_offset: Offset to the L2 table in the image file. 201 * @l2_slice: Location to store the pointer to the L2 slice. 202 * 203 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 204 * that are loaded by the qcow2 cache). If the slice is in the cache, 205 * the cache is used; otherwise the L2 slice is loaded from the image 206 * file. 207 */ 208 static int l2_load(BlockDriverState *bs, uint64_t offset, 209 uint64_t l2_offset, uint64_t **l2_slice) 210 { 211 BDRVQcow2State *s = bs->opaque; 212 int start_of_slice = sizeof(uint64_t) * 213 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 214 215 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 216 (void **)l2_slice); 217 } 218 219 /* 220 * Writes one sector of the L1 table to the disk (can't update single entries 221 * and we really don't want bdrv_pread to perform a read-modify-write) 222 */ 223 #define L1_ENTRIES_PER_SECTOR (512 / 8) 224 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 225 { 226 BDRVQcow2State *s = bs->opaque; 227 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 228 int l1_start_index; 229 int i, ret; 230 231 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 232 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 233 i++) 234 { 235 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 236 } 237 238 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 239 s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false); 240 if (ret < 0) { 241 return ret; 242 } 243 244 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 245 ret = bdrv_pwrite_sync(bs->file, 246 s->l1_table_offset + 8 * l1_start_index, 247 buf, sizeof(buf)); 248 if (ret < 0) { 249 return ret; 250 } 251 252 return 0; 253 } 254 255 /* 256 * l2_allocate 257 * 258 * Allocate a new l2 entry in the file. If l1_index points to an already 259 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 260 * table) copy the contents of the old L2 table into the newly allocated one. 261 * Otherwise the new table is initialized with zeros. 262 * 263 */ 264 265 static int l2_allocate(BlockDriverState *bs, int l1_index) 266 { 267 BDRVQcow2State *s = bs->opaque; 268 uint64_t old_l2_offset; 269 uint64_t *l2_slice = NULL; 270 unsigned slice, slice_size2, n_slices; 271 int64_t l2_offset; 272 int ret; 273 274 old_l2_offset = s->l1_table[l1_index]; 275 276 trace_qcow2_l2_allocate(bs, l1_index); 277 278 /* allocate a new l2 entry */ 279 280 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 281 if (l2_offset < 0) { 282 ret = l2_offset; 283 goto fail; 284 } 285 286 /* The offset must fit in the offset field of the L1 table entry */ 287 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); 288 289 /* If we're allocating the table at offset 0 then something is wrong */ 290 if (l2_offset == 0) { 291 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 292 "allocation of L2 table at offset 0"); 293 ret = -EIO; 294 goto fail; 295 } 296 297 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 298 if (ret < 0) { 299 goto fail; 300 } 301 302 /* allocate a new entry in the l2 cache */ 303 304 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 305 n_slices = s->cluster_size / slice_size2; 306 307 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 308 for (slice = 0; slice < n_slices; slice++) { 309 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 310 l2_offset + slice * slice_size2, 311 (void **) &l2_slice); 312 if (ret < 0) { 313 goto fail; 314 } 315 316 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 317 /* if there was no old l2 table, clear the new slice */ 318 memset(l2_slice, 0, slice_size2); 319 } else { 320 uint64_t *old_slice; 321 uint64_t old_l2_slice_offset = 322 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 323 324 /* if there was an old l2 table, read a slice from the disk */ 325 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 326 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 327 (void **) &old_slice); 328 if (ret < 0) { 329 goto fail; 330 } 331 332 memcpy(l2_slice, old_slice, slice_size2); 333 334 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 335 } 336 337 /* write the l2 slice to the file */ 338 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 339 340 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 341 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 342 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 343 } 344 345 ret = qcow2_cache_flush(bs, s->l2_table_cache); 346 if (ret < 0) { 347 goto fail; 348 } 349 350 /* update the L1 entry */ 351 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 352 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 353 ret = qcow2_write_l1_entry(bs, l1_index); 354 if (ret < 0) { 355 goto fail; 356 } 357 358 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 359 return 0; 360 361 fail: 362 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 363 if (l2_slice != NULL) { 364 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 365 } 366 s->l1_table[l1_index] = old_l2_offset; 367 if (l2_offset > 0) { 368 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 369 QCOW2_DISCARD_ALWAYS); 370 } 371 return ret; 372 } 373 374 /* 375 * Checks how many clusters in a given L2 slice are contiguous in the image 376 * file. As soon as one of the flags in the bitmask stop_flags changes compared 377 * to the first cluster, the search is stopped and the cluster is not counted 378 * as contiguous. (This allows it, for example, to stop at the first compressed 379 * cluster which may require a different handling) 380 */ 381 static int count_contiguous_clusters(BlockDriverState *bs, int nb_clusters, 382 int cluster_size, uint64_t *l2_slice, uint64_t stop_flags) 383 { 384 int i; 385 QCow2ClusterType first_cluster_type; 386 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 387 uint64_t first_entry = be64_to_cpu(l2_slice[0]); 388 uint64_t offset = first_entry & mask; 389 390 first_cluster_type = qcow2_get_cluster_type(bs, first_entry); 391 if (first_cluster_type == QCOW2_CLUSTER_UNALLOCATED) { 392 return 0; 393 } 394 395 /* must be allocated */ 396 assert(first_cluster_type == QCOW2_CLUSTER_NORMAL || 397 first_cluster_type == QCOW2_CLUSTER_ZERO_ALLOC); 398 399 for (i = 0; i < nb_clusters; i++) { 400 uint64_t l2_entry = be64_to_cpu(l2_slice[i]) & mask; 401 if (offset + (uint64_t) i * cluster_size != l2_entry) { 402 break; 403 } 404 } 405 406 return i; 407 } 408 409 /* 410 * Checks how many consecutive unallocated clusters in a given L2 411 * slice have the same cluster type. 412 */ 413 static int count_contiguous_clusters_unallocated(BlockDriverState *bs, 414 int nb_clusters, 415 uint64_t *l2_slice, 416 QCow2ClusterType wanted_type) 417 { 418 int i; 419 420 assert(wanted_type == QCOW2_CLUSTER_ZERO_PLAIN || 421 wanted_type == QCOW2_CLUSTER_UNALLOCATED); 422 for (i = 0; i < nb_clusters; i++) { 423 uint64_t entry = be64_to_cpu(l2_slice[i]); 424 QCow2ClusterType type = qcow2_get_cluster_type(bs, entry); 425 426 if (type != wanted_type) { 427 break; 428 } 429 } 430 431 return i; 432 } 433 434 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 435 uint64_t src_cluster_offset, 436 unsigned offset_in_cluster, 437 QEMUIOVector *qiov) 438 { 439 int ret; 440 441 if (qiov->size == 0) { 442 return 0; 443 } 444 445 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 446 447 if (!bs->drv) { 448 return -ENOMEDIUM; 449 } 450 451 /* Call .bdrv_co_readv() directly instead of using the public block-layer 452 * interface. This avoids double I/O throttling and request tracking, 453 * which can lead to deadlock when block layer copy-on-read is enabled. 454 */ 455 ret = bs->drv->bdrv_co_preadv_part(bs, 456 src_cluster_offset + offset_in_cluster, 457 qiov->size, qiov, 0, 0); 458 if (ret < 0) { 459 return ret; 460 } 461 462 return 0; 463 } 464 465 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 466 uint64_t cluster_offset, 467 unsigned offset_in_cluster, 468 QEMUIOVector *qiov) 469 { 470 BDRVQcow2State *s = bs->opaque; 471 int ret; 472 473 if (qiov->size == 0) { 474 return 0; 475 } 476 477 ret = qcow2_pre_write_overlap_check(bs, 0, 478 cluster_offset + offset_in_cluster, qiov->size, true); 479 if (ret < 0) { 480 return ret; 481 } 482 483 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 484 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, 485 qiov->size, qiov, 0); 486 if (ret < 0) { 487 return ret; 488 } 489 490 return 0; 491 } 492 493 494 /* 495 * get_cluster_offset 496 * 497 * For a given offset of the virtual disk, find the cluster type and offset in 498 * the qcow2 file. The offset is stored in *cluster_offset. 499 * 500 * On entry, *bytes is the maximum number of contiguous bytes starting at 501 * offset that we are interested in. 502 * 503 * On exit, *bytes is the number of bytes starting at offset that have the same 504 * cluster type and (if applicable) are stored contiguously in the image file. 505 * Compressed clusters are always returned one by one. 506 * 507 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 508 * cases. 509 */ 510 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 511 unsigned int *bytes, uint64_t *cluster_offset) 512 { 513 BDRVQcow2State *s = bs->opaque; 514 unsigned int l2_index; 515 uint64_t l1_index, l2_offset, *l2_slice; 516 int c; 517 unsigned int offset_in_cluster; 518 uint64_t bytes_available, bytes_needed, nb_clusters; 519 QCow2ClusterType type; 520 int ret; 521 522 offset_in_cluster = offset_into_cluster(s, offset); 523 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 524 525 /* compute how many bytes there are between the start of the cluster 526 * containing offset and the end of the l2 slice that contains 527 * the entry pointing to it */ 528 bytes_available = 529 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 530 << s->cluster_bits; 531 532 if (bytes_needed > bytes_available) { 533 bytes_needed = bytes_available; 534 } 535 536 *cluster_offset = 0; 537 538 /* seek to the l2 offset in the l1 table */ 539 540 l1_index = offset_to_l1_index(s, offset); 541 if (l1_index >= s->l1_size) { 542 type = QCOW2_CLUSTER_UNALLOCATED; 543 goto out; 544 } 545 546 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 547 if (!l2_offset) { 548 type = QCOW2_CLUSTER_UNALLOCATED; 549 goto out; 550 } 551 552 if (offset_into_cluster(s, l2_offset)) { 553 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 554 " unaligned (L1 index: %#" PRIx64 ")", 555 l2_offset, l1_index); 556 return -EIO; 557 } 558 559 /* load the l2 slice in memory */ 560 561 ret = l2_load(bs, offset, l2_offset, &l2_slice); 562 if (ret < 0) { 563 return ret; 564 } 565 566 /* find the cluster offset for the given disk offset */ 567 568 l2_index = offset_to_l2_slice_index(s, offset); 569 *cluster_offset = be64_to_cpu(l2_slice[l2_index]); 570 571 nb_clusters = size_to_clusters(s, bytes_needed); 572 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 573 * integers; the minimum cluster size is 512, so this assertion is always 574 * true */ 575 assert(nb_clusters <= INT_MAX); 576 577 type = qcow2_get_cluster_type(bs, *cluster_offset); 578 if (s->qcow_version < 3 && (type == QCOW2_CLUSTER_ZERO_PLAIN || 579 type == QCOW2_CLUSTER_ZERO_ALLOC)) { 580 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 581 " in pre-v3 image (L2 offset: %#" PRIx64 582 ", L2 index: %#x)", l2_offset, l2_index); 583 ret = -EIO; 584 goto fail; 585 } 586 switch (type) { 587 case QCOW2_CLUSTER_COMPRESSED: 588 if (has_data_file(bs)) { 589 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " 590 "entry found in image with external data " 591 "file (L2 offset: %#" PRIx64 ", L2 index: " 592 "%#x)", l2_offset, l2_index); 593 ret = -EIO; 594 goto fail; 595 } 596 /* Compressed clusters can only be processed one by one */ 597 c = 1; 598 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 599 break; 600 case QCOW2_CLUSTER_ZERO_PLAIN: 601 case QCOW2_CLUSTER_UNALLOCATED: 602 /* how many empty clusters ? */ 603 c = count_contiguous_clusters_unallocated(bs, nb_clusters, 604 &l2_slice[l2_index], type); 605 *cluster_offset = 0; 606 break; 607 case QCOW2_CLUSTER_ZERO_ALLOC: 608 case QCOW2_CLUSTER_NORMAL: 609 /* how many allocated clusters ? */ 610 c = count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 611 &l2_slice[l2_index], QCOW_OFLAG_ZERO); 612 *cluster_offset &= L2E_OFFSET_MASK; 613 if (offset_into_cluster(s, *cluster_offset)) { 614 qcow2_signal_corruption(bs, true, -1, -1, 615 "Cluster allocation offset %#" 616 PRIx64 " unaligned (L2 offset: %#" PRIx64 617 ", L2 index: %#x)", *cluster_offset, 618 l2_offset, l2_index); 619 ret = -EIO; 620 goto fail; 621 } 622 if (has_data_file(bs) && *cluster_offset != offset - offset_in_cluster) 623 { 624 qcow2_signal_corruption(bs, true, -1, -1, 625 "External data file host cluster offset %#" 626 PRIx64 " does not match guest cluster " 627 "offset: %#" PRIx64 628 ", L2 index: %#x)", *cluster_offset, 629 offset - offset_in_cluster, l2_index); 630 ret = -EIO; 631 goto fail; 632 } 633 break; 634 default: 635 abort(); 636 } 637 638 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 639 640 bytes_available = (int64_t)c * s->cluster_size; 641 642 out: 643 if (bytes_available > bytes_needed) { 644 bytes_available = bytes_needed; 645 } 646 647 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 648 * subtracting offset_in_cluster will therefore definitely yield something 649 * not exceeding UINT_MAX */ 650 assert(bytes_available - offset_in_cluster <= UINT_MAX); 651 *bytes = bytes_available - offset_in_cluster; 652 653 return type; 654 655 fail: 656 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 657 return ret; 658 } 659 660 /* 661 * get_cluster_table 662 * 663 * for a given disk offset, load (and allocate if needed) 664 * the appropriate slice of its l2 table. 665 * 666 * the cluster index in the l2 slice is given to the caller. 667 * 668 * Returns 0 on success, -errno in failure case 669 */ 670 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 671 uint64_t **new_l2_slice, 672 int *new_l2_index) 673 { 674 BDRVQcow2State *s = bs->opaque; 675 unsigned int l2_index; 676 uint64_t l1_index, l2_offset; 677 uint64_t *l2_slice = NULL; 678 int ret; 679 680 /* seek to the l2 offset in the l1 table */ 681 682 l1_index = offset_to_l1_index(s, offset); 683 if (l1_index >= s->l1_size) { 684 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 685 if (ret < 0) { 686 return ret; 687 } 688 } 689 690 assert(l1_index < s->l1_size); 691 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 692 if (offset_into_cluster(s, l2_offset)) { 693 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 694 " unaligned (L1 index: %#" PRIx64 ")", 695 l2_offset, l1_index); 696 return -EIO; 697 } 698 699 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 700 /* First allocate a new L2 table (and do COW if needed) */ 701 ret = l2_allocate(bs, l1_index); 702 if (ret < 0) { 703 return ret; 704 } 705 706 /* Then decrease the refcount of the old table */ 707 if (l2_offset) { 708 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 709 QCOW2_DISCARD_OTHER); 710 } 711 712 /* Get the offset of the newly-allocated l2 table */ 713 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 714 assert(offset_into_cluster(s, l2_offset) == 0); 715 } 716 717 /* load the l2 slice in memory */ 718 ret = l2_load(bs, offset, l2_offset, &l2_slice); 719 if (ret < 0) { 720 return ret; 721 } 722 723 /* find the cluster offset for the given disk offset */ 724 725 l2_index = offset_to_l2_slice_index(s, offset); 726 727 *new_l2_slice = l2_slice; 728 *new_l2_index = l2_index; 729 730 return 0; 731 } 732 733 /* 734 * alloc_compressed_cluster_offset 735 * 736 * For a given offset on the virtual disk, allocate a new compressed cluster 737 * and put the host offset of the cluster into *host_offset. If a cluster is 738 * already allocated at the offset, return an error. 739 * 740 * Return 0 on success and -errno in error cases 741 */ 742 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 743 uint64_t offset, 744 int compressed_size, 745 uint64_t *host_offset) 746 { 747 BDRVQcow2State *s = bs->opaque; 748 int l2_index, ret; 749 uint64_t *l2_slice; 750 int64_t cluster_offset; 751 int nb_csectors; 752 753 if (has_data_file(bs)) { 754 return 0; 755 } 756 757 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 758 if (ret < 0) { 759 return ret; 760 } 761 762 /* Compression can't overwrite anything. Fail if the cluster was already 763 * allocated. */ 764 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 765 if (cluster_offset & L2E_OFFSET_MASK) { 766 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 767 return -EIO; 768 } 769 770 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 771 if (cluster_offset < 0) { 772 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 773 return cluster_offset; 774 } 775 776 nb_csectors = 777 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE - 778 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE); 779 780 cluster_offset |= QCOW_OFLAG_COMPRESSED | 781 ((uint64_t)nb_csectors << s->csize_shift); 782 783 /* update L2 table */ 784 785 /* compressed clusters never have the copied flag */ 786 787 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 788 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 789 l2_slice[l2_index] = cpu_to_be64(cluster_offset); 790 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 791 792 *host_offset = cluster_offset & s->cluster_offset_mask; 793 return 0; 794 } 795 796 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 797 { 798 BDRVQcow2State *s = bs->opaque; 799 Qcow2COWRegion *start = &m->cow_start; 800 Qcow2COWRegion *end = &m->cow_end; 801 unsigned buffer_size; 802 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 803 bool merge_reads; 804 uint8_t *start_buffer, *end_buffer; 805 QEMUIOVector qiov; 806 int ret; 807 808 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 809 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 810 assert(start->offset + start->nb_bytes <= end->offset); 811 812 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) { 813 return 0; 814 } 815 816 /* If we have to read both the start and end COW regions and the 817 * middle region is not too large then perform just one read 818 * operation */ 819 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 820 if (merge_reads) { 821 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 822 } else { 823 /* If we have to do two reads, add some padding in the middle 824 * if necessary to make sure that the end region is optimally 825 * aligned. */ 826 size_t align = bdrv_opt_mem_align(bs); 827 assert(align > 0 && align <= UINT_MAX); 828 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 829 UINT_MAX - end->nb_bytes); 830 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 831 } 832 833 /* Reserve a buffer large enough to store all the data that we're 834 * going to read */ 835 start_buffer = qemu_try_blockalign(bs, buffer_size); 836 if (start_buffer == NULL) { 837 return -ENOMEM; 838 } 839 /* The part of the buffer where the end region is located */ 840 end_buffer = start_buffer + buffer_size - end->nb_bytes; 841 842 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? 843 qemu_iovec_subvec_niov(m->data_qiov, 844 m->data_qiov_offset, 845 data_bytes) 846 : 0)); 847 848 qemu_co_mutex_unlock(&s->lock); 849 /* First we read the existing data from both COW regions. We 850 * either read the whole region in one go, or the start and end 851 * regions separately. */ 852 if (merge_reads) { 853 qemu_iovec_add(&qiov, start_buffer, buffer_size); 854 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 855 } else { 856 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 857 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 858 if (ret < 0) { 859 goto fail; 860 } 861 862 qemu_iovec_reset(&qiov); 863 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 864 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 865 } 866 if (ret < 0) { 867 goto fail; 868 } 869 870 /* Encrypt the data if necessary before writing it */ 871 if (bs->encrypted) { 872 ret = qcow2_co_encrypt(bs, 873 m->alloc_offset + start->offset, 874 m->offset + start->offset, 875 start_buffer, start->nb_bytes); 876 if (ret < 0) { 877 goto fail; 878 } 879 880 ret = qcow2_co_encrypt(bs, 881 m->alloc_offset + end->offset, 882 m->offset + end->offset, 883 end_buffer, end->nb_bytes); 884 if (ret < 0) { 885 goto fail; 886 } 887 } 888 889 /* And now we can write everything. If we have the guest data we 890 * can write everything in one single operation */ 891 if (m->data_qiov) { 892 qemu_iovec_reset(&qiov); 893 if (start->nb_bytes) { 894 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 895 } 896 qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes); 897 if (end->nb_bytes) { 898 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 899 } 900 /* NOTE: we have a write_aio blkdebug event here followed by 901 * a cow_write one in do_perform_cow_write(), but there's only 902 * one single I/O operation */ 903 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 904 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 905 } else { 906 /* If there's no guest data then write both COW regions separately */ 907 qemu_iovec_reset(&qiov); 908 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 909 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 910 if (ret < 0) { 911 goto fail; 912 } 913 914 qemu_iovec_reset(&qiov); 915 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 916 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 917 } 918 919 fail: 920 qemu_co_mutex_lock(&s->lock); 921 922 /* 923 * Before we update the L2 table to actually point to the new cluster, we 924 * need to be sure that the refcounts have been increased and COW was 925 * handled. 926 */ 927 if (ret == 0) { 928 qcow2_cache_depends_on_flush(s->l2_table_cache); 929 } 930 931 qemu_vfree(start_buffer); 932 qemu_iovec_destroy(&qiov); 933 return ret; 934 } 935 936 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 937 { 938 BDRVQcow2State *s = bs->opaque; 939 int i, j = 0, l2_index, ret; 940 uint64_t *old_cluster, *l2_slice; 941 uint64_t cluster_offset = m->alloc_offset; 942 943 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 944 assert(m->nb_clusters > 0); 945 946 old_cluster = g_try_new(uint64_t, m->nb_clusters); 947 if (old_cluster == NULL) { 948 ret = -ENOMEM; 949 goto err; 950 } 951 952 /* copy content of unmodified sectors */ 953 ret = perform_cow(bs, m); 954 if (ret < 0) { 955 goto err; 956 } 957 958 /* Update L2 table. */ 959 if (s->use_lazy_refcounts) { 960 qcow2_mark_dirty(bs); 961 } 962 if (qcow2_need_accurate_refcounts(s)) { 963 qcow2_cache_set_dependency(bs, s->l2_table_cache, 964 s->refcount_block_cache); 965 } 966 967 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 968 if (ret < 0) { 969 goto err; 970 } 971 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 972 973 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 974 for (i = 0; i < m->nb_clusters; i++) { 975 /* if two concurrent writes happen to the same unallocated cluster 976 * each write allocates separate cluster and writes data concurrently. 977 * The first one to complete updates l2 table with pointer to its 978 * cluster the second one has to do RMW (which is done above by 979 * perform_cow()), update l2 table with its cluster pointer and free 980 * old cluster. This is what this loop does */ 981 if (l2_slice[l2_index + i] != 0) { 982 old_cluster[j++] = l2_slice[l2_index + i]; 983 } 984 985 l2_slice[l2_index + i] = cpu_to_be64((cluster_offset + 986 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 987 } 988 989 990 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 991 992 /* 993 * If this was a COW, we need to decrease the refcount of the old cluster. 994 * 995 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 996 * clusters), the next write will reuse them anyway. 997 */ 998 if (!m->keep_old_clusters && j != 0) { 999 for (i = 0; i < j; i++) { 1000 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 1001 QCOW2_DISCARD_NEVER); 1002 } 1003 } 1004 1005 ret = 0; 1006 err: 1007 g_free(old_cluster); 1008 return ret; 1009 } 1010 1011 /** 1012 * Frees the allocated clusters because the request failed and they won't 1013 * actually be linked. 1014 */ 1015 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) 1016 { 1017 BDRVQcow2State *s = bs->opaque; 1018 qcow2_free_clusters(bs, m->alloc_offset, m->nb_clusters << s->cluster_bits, 1019 QCOW2_DISCARD_NEVER); 1020 } 1021 1022 /* 1023 * Returns the number of contiguous clusters that can be used for an allocating 1024 * write, but require COW to be performed (this includes yet unallocated space, 1025 * which must copy from the backing file) 1026 */ 1027 static int count_cow_clusters(BlockDriverState *bs, int nb_clusters, 1028 uint64_t *l2_slice, int l2_index) 1029 { 1030 int i; 1031 1032 for (i = 0; i < nb_clusters; i++) { 1033 uint64_t l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1034 QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry); 1035 1036 switch(cluster_type) { 1037 case QCOW2_CLUSTER_NORMAL: 1038 if (l2_entry & QCOW_OFLAG_COPIED) { 1039 goto out; 1040 } 1041 break; 1042 case QCOW2_CLUSTER_UNALLOCATED: 1043 case QCOW2_CLUSTER_COMPRESSED: 1044 case QCOW2_CLUSTER_ZERO_PLAIN: 1045 case QCOW2_CLUSTER_ZERO_ALLOC: 1046 break; 1047 default: 1048 abort(); 1049 } 1050 } 1051 1052 out: 1053 assert(i <= nb_clusters); 1054 return i; 1055 } 1056 1057 /* 1058 * Check if there already is an AIO write request in flight which allocates 1059 * the same cluster. In this case we need to wait until the previous 1060 * request has completed and updated the L2 table accordingly. 1061 * 1062 * Returns: 1063 * 0 if there was no dependency. *cur_bytes indicates the number of 1064 * bytes from guest_offset that can be read before the next 1065 * dependency must be processed (or the request is complete) 1066 * 1067 * -EAGAIN if we had to wait for another request, previously gathered 1068 * information on cluster allocation may be invalid now. The caller 1069 * must start over anyway, so consider *cur_bytes undefined. 1070 */ 1071 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1072 uint64_t *cur_bytes, QCowL2Meta **m) 1073 { 1074 BDRVQcow2State *s = bs->opaque; 1075 QCowL2Meta *old_alloc; 1076 uint64_t bytes = *cur_bytes; 1077 1078 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1079 1080 uint64_t start = guest_offset; 1081 uint64_t end = start + bytes; 1082 uint64_t old_start = l2meta_cow_start(old_alloc); 1083 uint64_t old_end = l2meta_cow_end(old_alloc); 1084 1085 if (end <= old_start || start >= old_end) { 1086 /* No intersection */ 1087 } else { 1088 if (start < old_start) { 1089 /* Stop at the start of a running allocation */ 1090 bytes = old_start - start; 1091 } else { 1092 bytes = 0; 1093 } 1094 1095 /* Stop if already an l2meta exists. After yielding, it wouldn't 1096 * be valid any more, so we'd have to clean up the old L2Metas 1097 * and deal with requests depending on them before starting to 1098 * gather new ones. Not worth the trouble. */ 1099 if (bytes == 0 && *m) { 1100 *cur_bytes = 0; 1101 return 0; 1102 } 1103 1104 if (bytes == 0) { 1105 /* Wait for the dependency to complete. We need to recheck 1106 * the free/allocated clusters when we continue. */ 1107 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1108 return -EAGAIN; 1109 } 1110 } 1111 } 1112 1113 /* Make sure that existing clusters and new allocations are only used up to 1114 * the next dependency if we shortened the request above */ 1115 *cur_bytes = bytes; 1116 1117 return 0; 1118 } 1119 1120 /* 1121 * Checks how many already allocated clusters that don't require a copy on 1122 * write there are at the given guest_offset (up to *bytes). If *host_offset is 1123 * not INV_OFFSET, only physically contiguous clusters beginning at this host 1124 * offset are counted. 1125 * 1126 * Note that guest_offset may not be cluster aligned. In this case, the 1127 * returned *host_offset points to exact byte referenced by guest_offset and 1128 * therefore isn't cluster aligned as well. 1129 * 1130 * Returns: 1131 * 0: if no allocated clusters are available at the given offset. 1132 * *bytes is normally unchanged. It is set to 0 if the cluster 1133 * is allocated and doesn't need COW, but doesn't have the right 1134 * physical offset. 1135 * 1136 * 1: if allocated clusters that don't require a COW are available at 1137 * the requested offset. *bytes may have decreased and describes 1138 * the length of the area that can be written to. 1139 * 1140 * -errno: in error cases 1141 */ 1142 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1143 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1144 { 1145 BDRVQcow2State *s = bs->opaque; 1146 int l2_index; 1147 uint64_t cluster_offset; 1148 uint64_t *l2_slice; 1149 uint64_t nb_clusters; 1150 unsigned int keep_clusters; 1151 int ret; 1152 1153 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1154 *bytes); 1155 1156 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) 1157 == offset_into_cluster(s, *host_offset)); 1158 1159 /* 1160 * Calculate the number of clusters to look for. We stop at L2 slice 1161 * boundaries to keep things simple. 1162 */ 1163 nb_clusters = 1164 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1165 1166 l2_index = offset_to_l2_slice_index(s, guest_offset); 1167 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1168 assert(nb_clusters <= INT_MAX); 1169 1170 /* Find L2 entry for the first involved cluster */ 1171 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1172 if (ret < 0) { 1173 return ret; 1174 } 1175 1176 cluster_offset = be64_to_cpu(l2_slice[l2_index]); 1177 1178 /* Check how many clusters are already allocated and don't need COW */ 1179 if (qcow2_get_cluster_type(bs, cluster_offset) == QCOW2_CLUSTER_NORMAL 1180 && (cluster_offset & QCOW_OFLAG_COPIED)) 1181 { 1182 /* If a specific host_offset is required, check it */ 1183 bool offset_matches = 1184 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1185 1186 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1187 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1188 "%#llx unaligned (guest offset: %#" PRIx64 1189 ")", cluster_offset & L2E_OFFSET_MASK, 1190 guest_offset); 1191 ret = -EIO; 1192 goto out; 1193 } 1194 1195 if (*host_offset != INV_OFFSET && !offset_matches) { 1196 *bytes = 0; 1197 ret = 0; 1198 goto out; 1199 } 1200 1201 /* We keep all QCOW_OFLAG_COPIED clusters */ 1202 keep_clusters = 1203 count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 1204 &l2_slice[l2_index], 1205 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1206 assert(keep_clusters <= nb_clusters); 1207 1208 *bytes = MIN(*bytes, 1209 keep_clusters * s->cluster_size 1210 - offset_into_cluster(s, guest_offset)); 1211 1212 ret = 1; 1213 } else { 1214 ret = 0; 1215 } 1216 1217 /* Cleanup */ 1218 out: 1219 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1220 1221 /* Only return a host offset if we actually made progress. Otherwise we 1222 * would make requirements for handle_alloc() that it can't fulfill */ 1223 if (ret > 0) { 1224 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1225 + offset_into_cluster(s, guest_offset); 1226 } 1227 1228 return ret; 1229 } 1230 1231 /* 1232 * Allocates new clusters for the given guest_offset. 1233 * 1234 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1235 * contain the number of clusters that have been allocated and are contiguous 1236 * in the image file. 1237 * 1238 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file 1239 * at which the new clusters must start. *nb_clusters can be 0 on return in 1240 * this case if the cluster at host_offset is already in use. If *host_offset 1241 * is INV_OFFSET, the clusters can be allocated anywhere in the image file. 1242 * 1243 * *host_offset is updated to contain the offset into the image file at which 1244 * the first allocated cluster starts. 1245 * 1246 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1247 * function has been waiting for another request and the allocation must be 1248 * restarted, but the whole request should not be failed. 1249 */ 1250 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1251 uint64_t *host_offset, uint64_t *nb_clusters) 1252 { 1253 BDRVQcow2State *s = bs->opaque; 1254 1255 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1256 *host_offset, *nb_clusters); 1257 1258 if (has_data_file(bs)) { 1259 assert(*host_offset == INV_OFFSET || 1260 *host_offset == start_of_cluster(s, guest_offset)); 1261 *host_offset = start_of_cluster(s, guest_offset); 1262 return 0; 1263 } 1264 1265 /* Allocate new clusters */ 1266 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1267 if (*host_offset == INV_OFFSET) { 1268 int64_t cluster_offset = 1269 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1270 if (cluster_offset < 0) { 1271 return cluster_offset; 1272 } 1273 *host_offset = cluster_offset; 1274 return 0; 1275 } else { 1276 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1277 if (ret < 0) { 1278 return ret; 1279 } 1280 *nb_clusters = ret; 1281 return 0; 1282 } 1283 } 1284 1285 /* 1286 * Allocates new clusters for an area that either is yet unallocated or needs a 1287 * copy on write. If *host_offset is not INV_OFFSET, clusters are only 1288 * allocated if the new allocation can match the specified host offset. 1289 * 1290 * Note that guest_offset may not be cluster aligned. In this case, the 1291 * returned *host_offset points to exact byte referenced by guest_offset and 1292 * therefore isn't cluster aligned as well. 1293 * 1294 * Returns: 1295 * 0: if no clusters could be allocated. *bytes is set to 0, 1296 * *host_offset is left unchanged. 1297 * 1298 * 1: if new clusters were allocated. *bytes may be decreased if the 1299 * new allocation doesn't cover all of the requested area. 1300 * *host_offset is updated to contain the host offset of the first 1301 * newly allocated cluster. 1302 * 1303 * -errno: in error cases 1304 */ 1305 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1306 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1307 { 1308 BDRVQcow2State *s = bs->opaque; 1309 int l2_index; 1310 uint64_t *l2_slice; 1311 uint64_t entry; 1312 uint64_t nb_clusters; 1313 int ret; 1314 bool keep_old_clusters = false; 1315 1316 uint64_t alloc_cluster_offset = INV_OFFSET; 1317 1318 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1319 *bytes); 1320 assert(*bytes > 0); 1321 1322 /* 1323 * Calculate the number of clusters to look for. We stop at L2 slice 1324 * boundaries to keep things simple. 1325 */ 1326 nb_clusters = 1327 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1328 1329 l2_index = offset_to_l2_slice_index(s, guest_offset); 1330 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1331 assert(nb_clusters <= INT_MAX); 1332 1333 /* Find L2 entry for the first involved cluster */ 1334 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1335 if (ret < 0) { 1336 return ret; 1337 } 1338 1339 entry = be64_to_cpu(l2_slice[l2_index]); 1340 nb_clusters = count_cow_clusters(bs, nb_clusters, l2_slice, l2_index); 1341 1342 /* This function is only called when there were no non-COW clusters, so if 1343 * we can't find any unallocated or COW clusters either, something is 1344 * wrong with our code. */ 1345 assert(nb_clusters > 0); 1346 1347 if (qcow2_get_cluster_type(bs, entry) == QCOW2_CLUSTER_ZERO_ALLOC && 1348 (entry & QCOW_OFLAG_COPIED) && 1349 (*host_offset == INV_OFFSET || 1350 start_of_cluster(s, *host_offset) == (entry & L2E_OFFSET_MASK))) 1351 { 1352 int preallocated_nb_clusters; 1353 1354 if (offset_into_cluster(s, entry & L2E_OFFSET_MASK)) { 1355 qcow2_signal_corruption(bs, true, -1, -1, "Preallocated zero " 1356 "cluster offset %#llx unaligned (guest " 1357 "offset: %#" PRIx64 ")", 1358 entry & L2E_OFFSET_MASK, guest_offset); 1359 ret = -EIO; 1360 goto fail; 1361 } 1362 1363 /* Try to reuse preallocated zero clusters; contiguous normal clusters 1364 * would be fine, too, but count_cow_clusters() above has limited 1365 * nb_clusters already to a range of COW clusters */ 1366 preallocated_nb_clusters = 1367 count_contiguous_clusters(bs, nb_clusters, s->cluster_size, 1368 &l2_slice[l2_index], QCOW_OFLAG_COPIED); 1369 assert(preallocated_nb_clusters > 0); 1370 1371 nb_clusters = preallocated_nb_clusters; 1372 alloc_cluster_offset = entry & L2E_OFFSET_MASK; 1373 1374 /* We want to reuse these clusters, so qcow2_alloc_cluster_link_l2() 1375 * should not free them. */ 1376 keep_old_clusters = true; 1377 } 1378 1379 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1380 1381 if (alloc_cluster_offset == INV_OFFSET) { 1382 /* Allocate, if necessary at a given offset in the image file */ 1383 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : 1384 start_of_cluster(s, *host_offset); 1385 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1386 &nb_clusters); 1387 if (ret < 0) { 1388 goto fail; 1389 } 1390 1391 /* Can't extend contiguous allocation */ 1392 if (nb_clusters == 0) { 1393 *bytes = 0; 1394 return 0; 1395 } 1396 1397 assert(alloc_cluster_offset != INV_OFFSET); 1398 } 1399 1400 /* 1401 * Save info needed for meta data update. 1402 * 1403 * requested_bytes: Number of bytes from the start of the first 1404 * newly allocated cluster to the end of the (possibly shortened 1405 * before) write request. 1406 * 1407 * avail_bytes: Number of bytes from the start of the first 1408 * newly allocated to the end of the last newly allocated cluster. 1409 * 1410 * nb_bytes: The number of bytes from the start of the first 1411 * newly allocated cluster to the end of the area that the write 1412 * request actually writes to (excluding COW at the end) 1413 */ 1414 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1415 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1416 int nb_bytes = MIN(requested_bytes, avail_bytes); 1417 QCowL2Meta *old_m = *m; 1418 1419 *m = g_malloc0(sizeof(**m)); 1420 1421 **m = (QCowL2Meta) { 1422 .next = old_m, 1423 1424 .alloc_offset = alloc_cluster_offset, 1425 .offset = start_of_cluster(s, guest_offset), 1426 .nb_clusters = nb_clusters, 1427 1428 .keep_old_clusters = keep_old_clusters, 1429 1430 .cow_start = { 1431 .offset = 0, 1432 .nb_bytes = offset_into_cluster(s, guest_offset), 1433 }, 1434 .cow_end = { 1435 .offset = nb_bytes, 1436 .nb_bytes = avail_bytes - nb_bytes, 1437 }, 1438 }; 1439 qemu_co_queue_init(&(*m)->dependent_requests); 1440 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1441 1442 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1443 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1444 assert(*bytes != 0); 1445 1446 return 1; 1447 1448 fail: 1449 if (*m && (*m)->nb_clusters > 0) { 1450 QLIST_REMOVE(*m, next_in_flight); 1451 } 1452 return ret; 1453 } 1454 1455 /* 1456 * alloc_cluster_offset 1457 * 1458 * For a given offset on the virtual disk, find the cluster offset in qcow2 1459 * file. If the offset is not found, allocate a new cluster. 1460 * 1461 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1462 * other fields in m are meaningless. 1463 * 1464 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1465 * contiguous clusters that have been allocated. In this case, the other 1466 * fields of m are valid and contain information about the first allocated 1467 * cluster. 1468 * 1469 * If the request conflicts with another write request in flight, the coroutine 1470 * is queued and will be reentered when the dependency has completed. 1471 * 1472 * Return 0 on success and -errno in error cases 1473 */ 1474 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1475 unsigned int *bytes, uint64_t *host_offset, 1476 QCowL2Meta **m) 1477 { 1478 BDRVQcow2State *s = bs->opaque; 1479 uint64_t start, remaining; 1480 uint64_t cluster_offset; 1481 uint64_t cur_bytes; 1482 int ret; 1483 1484 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1485 1486 again: 1487 start = offset; 1488 remaining = *bytes; 1489 cluster_offset = INV_OFFSET; 1490 *host_offset = INV_OFFSET; 1491 cur_bytes = 0; 1492 *m = NULL; 1493 1494 while (true) { 1495 1496 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { 1497 *host_offset = start_of_cluster(s, cluster_offset); 1498 } 1499 1500 assert(remaining >= cur_bytes); 1501 1502 start += cur_bytes; 1503 remaining -= cur_bytes; 1504 1505 if (cluster_offset != INV_OFFSET) { 1506 cluster_offset += cur_bytes; 1507 } 1508 1509 if (remaining == 0) { 1510 break; 1511 } 1512 1513 cur_bytes = remaining; 1514 1515 /* 1516 * Now start gathering as many contiguous clusters as possible: 1517 * 1518 * 1. Check for overlaps with in-flight allocations 1519 * 1520 * a) Overlap not in the first cluster -> shorten this request and 1521 * let the caller handle the rest in its next loop iteration. 1522 * 1523 * b) Real overlaps of two requests. Yield and restart the search 1524 * for contiguous clusters (the situation could have changed 1525 * while we were sleeping) 1526 * 1527 * c) TODO: Request starts in the same cluster as the in-flight 1528 * allocation ends. Shorten the COW of the in-fight allocation, 1529 * set cluster_offset to write to the same cluster and set up 1530 * the right synchronisation between the in-flight request and 1531 * the new one. 1532 */ 1533 ret = handle_dependencies(bs, start, &cur_bytes, m); 1534 if (ret == -EAGAIN) { 1535 /* Currently handle_dependencies() doesn't yield if we already had 1536 * an allocation. If it did, we would have to clean up the L2Meta 1537 * structs before starting over. */ 1538 assert(*m == NULL); 1539 goto again; 1540 } else if (ret < 0) { 1541 return ret; 1542 } else if (cur_bytes == 0) { 1543 break; 1544 } else { 1545 /* handle_dependencies() may have decreased cur_bytes (shortened 1546 * the allocations below) so that the next dependency is processed 1547 * correctly during the next loop iteration. */ 1548 } 1549 1550 /* 1551 * 2. Count contiguous COPIED clusters. 1552 */ 1553 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1554 if (ret < 0) { 1555 return ret; 1556 } else if (ret) { 1557 continue; 1558 } else if (cur_bytes == 0) { 1559 break; 1560 } 1561 1562 /* 1563 * 3. If the request still hasn't completed, allocate new clusters, 1564 * considering any cluster_offset of steps 1c or 2. 1565 */ 1566 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1567 if (ret < 0) { 1568 return ret; 1569 } else if (ret) { 1570 continue; 1571 } else { 1572 assert(cur_bytes == 0); 1573 break; 1574 } 1575 } 1576 1577 *bytes -= remaining; 1578 assert(*bytes > 0); 1579 assert(*host_offset != INV_OFFSET); 1580 1581 return 0; 1582 } 1583 1584 /* 1585 * This discards as many clusters of nb_clusters as possible at once (i.e. 1586 * all clusters in the same L2 slice) and returns the number of discarded 1587 * clusters. 1588 */ 1589 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1590 uint64_t nb_clusters, 1591 enum qcow2_discard_type type, bool full_discard) 1592 { 1593 BDRVQcow2State *s = bs->opaque; 1594 uint64_t *l2_slice; 1595 int l2_index; 1596 int ret; 1597 int i; 1598 1599 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1600 if (ret < 0) { 1601 return ret; 1602 } 1603 1604 /* Limit nb_clusters to one L2 slice */ 1605 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1606 assert(nb_clusters <= INT_MAX); 1607 1608 for (i = 0; i < nb_clusters; i++) { 1609 uint64_t old_l2_entry; 1610 1611 old_l2_entry = be64_to_cpu(l2_slice[l2_index + i]); 1612 1613 /* 1614 * If full_discard is false, make sure that a discarded area reads back 1615 * as zeroes for v3 images (we cannot do it for v2 without actually 1616 * writing a zero-filled buffer). We can skip the operation if the 1617 * cluster is already marked as zero, or if it's unallocated and we 1618 * don't have a backing file. 1619 * 1620 * TODO We might want to use bdrv_block_status(bs) here, but we're 1621 * holding s->lock, so that doesn't work today. 1622 * 1623 * If full_discard is true, the sector should not read back as zeroes, 1624 * but rather fall through to the backing file. 1625 */ 1626 switch (qcow2_get_cluster_type(bs, old_l2_entry)) { 1627 case QCOW2_CLUSTER_UNALLOCATED: 1628 if (full_discard || !bs->backing) { 1629 continue; 1630 } 1631 break; 1632 1633 case QCOW2_CLUSTER_ZERO_PLAIN: 1634 if (!full_discard) { 1635 continue; 1636 } 1637 break; 1638 1639 case QCOW2_CLUSTER_ZERO_ALLOC: 1640 case QCOW2_CLUSTER_NORMAL: 1641 case QCOW2_CLUSTER_COMPRESSED: 1642 break; 1643 1644 default: 1645 abort(); 1646 } 1647 1648 /* First remove L2 entries */ 1649 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1650 if (!full_discard && s->qcow_version >= 3) { 1651 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1652 } else { 1653 l2_slice[l2_index + i] = cpu_to_be64(0); 1654 } 1655 1656 /* Then decrease the refcount */ 1657 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1658 } 1659 1660 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1661 1662 return nb_clusters; 1663 } 1664 1665 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1666 uint64_t bytes, enum qcow2_discard_type type, 1667 bool full_discard) 1668 { 1669 BDRVQcow2State *s = bs->opaque; 1670 uint64_t end_offset = offset + bytes; 1671 uint64_t nb_clusters; 1672 int64_t cleared; 1673 int ret; 1674 1675 /* Caller must pass aligned values, except at image end */ 1676 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1677 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1678 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1679 1680 nb_clusters = size_to_clusters(s, bytes); 1681 1682 s->cache_discards = true; 1683 1684 /* Each L2 slice is handled by its own loop iteration */ 1685 while (nb_clusters > 0) { 1686 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1687 full_discard); 1688 if (cleared < 0) { 1689 ret = cleared; 1690 goto fail; 1691 } 1692 1693 nb_clusters -= cleared; 1694 offset += (cleared * s->cluster_size); 1695 } 1696 1697 ret = 0; 1698 fail: 1699 s->cache_discards = false; 1700 qcow2_process_discards(bs, ret); 1701 1702 return ret; 1703 } 1704 1705 /* 1706 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1707 * all clusters in the same L2 slice) and returns the number of zeroed 1708 * clusters. 1709 */ 1710 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1711 uint64_t nb_clusters, int flags) 1712 { 1713 BDRVQcow2State *s = bs->opaque; 1714 uint64_t *l2_slice; 1715 int l2_index; 1716 int ret; 1717 int i; 1718 bool unmap = !!(flags & BDRV_REQ_MAY_UNMAP); 1719 1720 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1721 if (ret < 0) { 1722 return ret; 1723 } 1724 1725 /* Limit nb_clusters to one L2 slice */ 1726 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1727 assert(nb_clusters <= INT_MAX); 1728 1729 for (i = 0; i < nb_clusters; i++) { 1730 uint64_t old_offset; 1731 QCow2ClusterType cluster_type; 1732 1733 old_offset = be64_to_cpu(l2_slice[l2_index + i]); 1734 1735 /* 1736 * Minimize L2 changes if the cluster already reads back as 1737 * zeroes with correct allocation. 1738 */ 1739 cluster_type = qcow2_get_cluster_type(bs, old_offset); 1740 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN || 1741 (cluster_type == QCOW2_CLUSTER_ZERO_ALLOC && !unmap)) { 1742 continue; 1743 } 1744 1745 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1746 if (cluster_type == QCOW2_CLUSTER_COMPRESSED || unmap) { 1747 l2_slice[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1748 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1749 } else { 1750 l2_slice[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1751 } 1752 } 1753 1754 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1755 1756 return nb_clusters; 1757 } 1758 1759 int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset, 1760 uint64_t bytes, int flags) 1761 { 1762 BDRVQcow2State *s = bs->opaque; 1763 uint64_t end_offset = offset + bytes; 1764 uint64_t nb_clusters; 1765 int64_t cleared; 1766 int ret; 1767 1768 /* If we have to stay in sync with an external data file, zero out 1769 * s->data_file first. */ 1770 if (data_file_is_raw(bs)) { 1771 assert(has_data_file(bs)); 1772 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); 1773 if (ret < 0) { 1774 return ret; 1775 } 1776 } 1777 1778 /* Caller must pass aligned values, except at image end */ 1779 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1780 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1781 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1782 1783 /* The zero flag is only supported by version 3 and newer */ 1784 if (s->qcow_version < 3) { 1785 return -ENOTSUP; 1786 } 1787 1788 /* Each L2 slice is handled by its own loop iteration */ 1789 nb_clusters = size_to_clusters(s, bytes); 1790 1791 s->cache_discards = true; 1792 1793 while (nb_clusters > 0) { 1794 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 1795 if (cleared < 0) { 1796 ret = cleared; 1797 goto fail; 1798 } 1799 1800 nb_clusters -= cleared; 1801 offset += (cleared * s->cluster_size); 1802 } 1803 1804 ret = 0; 1805 fail: 1806 s->cache_discards = false; 1807 qcow2_process_discards(bs, ret); 1808 1809 return ret; 1810 } 1811 1812 /* 1813 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1814 * non-backed non-pre-allocated zero clusters). 1815 * 1816 * l1_entries and *visited_l1_entries are used to keep track of progress for 1817 * status_cb(). l1_entries contains the total number of L1 entries and 1818 * *visited_l1_entries counts all visited L1 entries. 1819 */ 1820 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1821 int l1_size, int64_t *visited_l1_entries, 1822 int64_t l1_entries, 1823 BlockDriverAmendStatusCB *status_cb, 1824 void *cb_opaque) 1825 { 1826 BDRVQcow2State *s = bs->opaque; 1827 bool is_active_l1 = (l1_table == s->l1_table); 1828 uint64_t *l2_slice = NULL; 1829 unsigned slice, slice_size2, n_slices; 1830 int ret; 1831 int i, j; 1832 1833 slice_size2 = s->l2_slice_size * sizeof(uint64_t); 1834 n_slices = s->cluster_size / slice_size2; 1835 1836 if (!is_active_l1) { 1837 /* inactive L2 tables require a buffer to be stored in when loading 1838 * them from disk */ 1839 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 1840 if (l2_slice == NULL) { 1841 return -ENOMEM; 1842 } 1843 } 1844 1845 for (i = 0; i < l1_size; i++) { 1846 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1847 uint64_t l2_refcount; 1848 1849 if (!l2_offset) { 1850 /* unallocated */ 1851 (*visited_l1_entries)++; 1852 if (status_cb) { 1853 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1854 } 1855 continue; 1856 } 1857 1858 if (offset_into_cluster(s, l2_offset)) { 1859 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1860 PRIx64 " unaligned (L1 index: %#x)", 1861 l2_offset, i); 1862 ret = -EIO; 1863 goto fail; 1864 } 1865 1866 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1867 &l2_refcount); 1868 if (ret < 0) { 1869 goto fail; 1870 } 1871 1872 for (slice = 0; slice < n_slices; slice++) { 1873 uint64_t slice_offset = l2_offset + slice * slice_size2; 1874 bool l2_dirty = false; 1875 if (is_active_l1) { 1876 /* get active L2 tables from cache */ 1877 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 1878 (void **)&l2_slice); 1879 } else { 1880 /* load inactive L2 tables from disk */ 1881 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 1882 } 1883 if (ret < 0) { 1884 goto fail; 1885 } 1886 1887 for (j = 0; j < s->l2_slice_size; j++) { 1888 uint64_t l2_entry = be64_to_cpu(l2_slice[j]); 1889 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1890 QCow2ClusterType cluster_type = 1891 qcow2_get_cluster_type(bs, l2_entry); 1892 1893 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 1894 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 1895 continue; 1896 } 1897 1898 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1899 if (!bs->backing) { 1900 /* not backed; therefore we can simply deallocate the 1901 * cluster */ 1902 l2_slice[j] = 0; 1903 l2_dirty = true; 1904 continue; 1905 } 1906 1907 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1908 if (offset < 0) { 1909 ret = offset; 1910 goto fail; 1911 } 1912 1913 if (l2_refcount > 1) { 1914 /* For shared L2 tables, set the refcount accordingly 1915 * (it is already 1 and needs to be l2_refcount) */ 1916 ret = qcow2_update_cluster_refcount( 1917 bs, offset >> s->cluster_bits, 1918 refcount_diff(1, l2_refcount), false, 1919 QCOW2_DISCARD_OTHER); 1920 if (ret < 0) { 1921 qcow2_free_clusters(bs, offset, s->cluster_size, 1922 QCOW2_DISCARD_OTHER); 1923 goto fail; 1924 } 1925 } 1926 } 1927 1928 if (offset_into_cluster(s, offset)) { 1929 int l2_index = slice * s->l2_slice_size + j; 1930 qcow2_signal_corruption( 1931 bs, true, -1, -1, 1932 "Cluster allocation offset " 1933 "%#" PRIx64 " unaligned (L2 offset: %#" 1934 PRIx64 ", L2 index: %#x)", offset, 1935 l2_offset, l2_index); 1936 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1937 qcow2_free_clusters(bs, offset, s->cluster_size, 1938 QCOW2_DISCARD_ALWAYS); 1939 } 1940 ret = -EIO; 1941 goto fail; 1942 } 1943 1944 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 1945 s->cluster_size, true); 1946 if (ret < 0) { 1947 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1948 qcow2_free_clusters(bs, offset, s->cluster_size, 1949 QCOW2_DISCARD_ALWAYS); 1950 } 1951 goto fail; 1952 } 1953 1954 ret = bdrv_pwrite_zeroes(s->data_file, offset, 1955 s->cluster_size, 0); 1956 if (ret < 0) { 1957 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 1958 qcow2_free_clusters(bs, offset, s->cluster_size, 1959 QCOW2_DISCARD_ALWAYS); 1960 } 1961 goto fail; 1962 } 1963 1964 if (l2_refcount == 1) { 1965 l2_slice[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1966 } else { 1967 l2_slice[j] = cpu_to_be64(offset); 1968 } 1969 l2_dirty = true; 1970 } 1971 1972 if (is_active_l1) { 1973 if (l2_dirty) { 1974 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1975 qcow2_cache_depends_on_flush(s->l2_table_cache); 1976 } 1977 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1978 } else { 1979 if (l2_dirty) { 1980 ret = qcow2_pre_write_overlap_check( 1981 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 1982 slice_offset, slice_size2, false); 1983 if (ret < 0) { 1984 goto fail; 1985 } 1986 1987 ret = bdrv_pwrite(bs->file, slice_offset, 1988 l2_slice, slice_size2); 1989 if (ret < 0) { 1990 goto fail; 1991 } 1992 } 1993 } 1994 } 1995 1996 (*visited_l1_entries)++; 1997 if (status_cb) { 1998 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1999 } 2000 } 2001 2002 ret = 0; 2003 2004 fail: 2005 if (l2_slice) { 2006 if (!is_active_l1) { 2007 qemu_vfree(l2_slice); 2008 } else { 2009 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2010 } 2011 } 2012 return ret; 2013 } 2014 2015 /* 2016 * For backed images, expands all zero clusters on the image. For non-backed 2017 * images, deallocates all non-pre-allocated zero clusters (and claims the 2018 * allocation for pre-allocated ones). This is important for downgrading to a 2019 * qcow2 version which doesn't yet support metadata zero clusters. 2020 */ 2021 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2022 BlockDriverAmendStatusCB *status_cb, 2023 void *cb_opaque) 2024 { 2025 BDRVQcow2State *s = bs->opaque; 2026 uint64_t *l1_table = NULL; 2027 int64_t l1_entries = 0, visited_l1_entries = 0; 2028 int ret; 2029 int i, j; 2030 2031 if (status_cb) { 2032 l1_entries = s->l1_size; 2033 for (i = 0; i < s->nb_snapshots; i++) { 2034 l1_entries += s->snapshots[i].l1_size; 2035 } 2036 } 2037 2038 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2039 &visited_l1_entries, l1_entries, 2040 status_cb, cb_opaque); 2041 if (ret < 0) { 2042 goto fail; 2043 } 2044 2045 /* Inactive L1 tables may point to active L2 tables - therefore it is 2046 * necessary to flush the L2 table cache before trying to access the L2 2047 * tables pointed to by inactive L1 entries (else we might try to expand 2048 * zero clusters that have already been expanded); furthermore, it is also 2049 * necessary to empty the L2 table cache, since it may contain tables which 2050 * are now going to be modified directly on disk, bypassing the cache. 2051 * qcow2_cache_empty() does both for us. */ 2052 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2053 if (ret < 0) { 2054 goto fail; 2055 } 2056 2057 for (i = 0; i < s->nb_snapshots; i++) { 2058 int l1_size2; 2059 uint64_t *new_l1_table; 2060 Error *local_err = NULL; 2061 2062 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, 2063 s->snapshots[i].l1_size, sizeof(uint64_t), 2064 QCOW_MAX_L1_SIZE, "Snapshot L1 table", 2065 &local_err); 2066 if (ret < 0) { 2067 error_report_err(local_err); 2068 goto fail; 2069 } 2070 2071 l1_size2 = s->snapshots[i].l1_size * sizeof(uint64_t); 2072 new_l1_table = g_try_realloc(l1_table, l1_size2); 2073 2074 if (!new_l1_table) { 2075 ret = -ENOMEM; 2076 goto fail; 2077 } 2078 2079 l1_table = new_l1_table; 2080 2081 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, 2082 l1_table, l1_size2); 2083 if (ret < 0) { 2084 goto fail; 2085 } 2086 2087 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2088 be64_to_cpus(&l1_table[j]); 2089 } 2090 2091 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2092 &visited_l1_entries, l1_entries, 2093 status_cb, cb_opaque); 2094 if (ret < 0) { 2095 goto fail; 2096 } 2097 } 2098 2099 ret = 0; 2100 2101 fail: 2102 g_free(l1_table); 2103 return ret; 2104 } 2105