1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qcow2.h" 30 #include "qemu/bswap.h" 31 #include "trace.h" 32 33 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 34 { 35 BDRVQcow2State *s = bs->opaque; 36 int new_l1_size, i, ret; 37 38 if (exact_size >= s->l1_size) { 39 return 0; 40 } 41 42 new_l1_size = exact_size; 43 44 #ifdef DEBUG_ALLOC2 45 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 46 #endif 47 48 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 49 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 50 new_l1_size * L1E_SIZE, 51 (s->l1_size - new_l1_size) * L1E_SIZE, 0); 52 if (ret < 0) { 53 goto fail; 54 } 55 56 ret = bdrv_flush(bs->file->bs); 57 if (ret < 0) { 58 goto fail; 59 } 60 61 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 62 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 63 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 64 continue; 65 } 66 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 67 s->cluster_size, QCOW2_DISCARD_ALWAYS); 68 s->l1_table[i] = 0; 69 } 70 return 0; 71 72 fail: 73 /* 74 * If the write in the l1_table failed the image may contain a partially 75 * overwritten l1_table. In this case it would be better to clear the 76 * l1_table in memory to avoid possible image corruption. 77 */ 78 memset(s->l1_table + new_l1_size, 0, 79 (s->l1_size - new_l1_size) * L1E_SIZE); 80 return ret; 81 } 82 83 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 84 bool exact_size) 85 { 86 BDRVQcow2State *s = bs->opaque; 87 int new_l1_size2, ret, i; 88 uint64_t *new_l1_table; 89 int64_t old_l1_table_offset, old_l1_size; 90 int64_t new_l1_table_offset, new_l1_size; 91 uint8_t data[12]; 92 93 if (min_size <= s->l1_size) 94 return 0; 95 96 /* Do a sanity check on min_size before trying to calculate new_l1_size 97 * (this prevents overflows during the while loop for the calculation of 98 * new_l1_size) */ 99 if (min_size > INT_MAX / L1E_SIZE) { 100 return -EFBIG; 101 } 102 103 if (exact_size) { 104 new_l1_size = min_size; 105 } else { 106 /* Bump size up to reduce the number of times we have to grow */ 107 new_l1_size = s->l1_size; 108 if (new_l1_size == 0) { 109 new_l1_size = 1; 110 } 111 while (min_size > new_l1_size) { 112 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 113 } 114 } 115 116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 117 if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) { 118 return -EFBIG; 119 } 120 121 #ifdef DEBUG_ALLOC2 122 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 123 s->l1_size, new_l1_size); 124 #endif 125 126 new_l1_size2 = L1E_SIZE * new_l1_size; 127 new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2); 128 if (new_l1_table == NULL) { 129 return -ENOMEM; 130 } 131 memset(new_l1_table, 0, new_l1_size2); 132 133 if (s->l1_size) { 134 memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE); 135 } 136 137 /* write new table (align to cluster) */ 138 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 139 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 140 if (new_l1_table_offset < 0) { 141 qemu_vfree(new_l1_table); 142 return new_l1_table_offset; 143 } 144 145 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 146 if (ret < 0) { 147 goto fail; 148 } 149 150 /* the L1 position has not yet been updated, so these clusters must 151 * indeed be completely free */ 152 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 153 new_l1_size2, false); 154 if (ret < 0) { 155 goto fail; 156 } 157 158 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 159 for(i = 0; i < s->l1_size; i++) 160 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 161 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 162 new_l1_table, new_l1_size2); 163 if (ret < 0) 164 goto fail; 165 for(i = 0; i < s->l1_size; i++) 166 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 167 168 /* set new table */ 169 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 170 stl_be_p(data, new_l1_size); 171 stq_be_p(data + 4, new_l1_table_offset); 172 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 173 data, sizeof(data)); 174 if (ret < 0) { 175 goto fail; 176 } 177 qemu_vfree(s->l1_table); 178 old_l1_table_offset = s->l1_table_offset; 179 s->l1_table_offset = new_l1_table_offset; 180 s->l1_table = new_l1_table; 181 old_l1_size = s->l1_size; 182 s->l1_size = new_l1_size; 183 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE, 184 QCOW2_DISCARD_OTHER); 185 return 0; 186 fail: 187 qemu_vfree(new_l1_table); 188 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 189 QCOW2_DISCARD_OTHER); 190 return ret; 191 } 192 193 /* 194 * l2_load 195 * 196 * @bs: The BlockDriverState 197 * @offset: A guest offset, used to calculate what slice of the L2 198 * table to load. 199 * @l2_offset: Offset to the L2 table in the image file. 200 * @l2_slice: Location to store the pointer to the L2 slice. 201 * 202 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 203 * that are loaded by the qcow2 cache). If the slice is in the cache, 204 * the cache is used; otherwise the L2 slice is loaded from the image 205 * file. 206 */ 207 static int l2_load(BlockDriverState *bs, uint64_t offset, 208 uint64_t l2_offset, uint64_t **l2_slice) 209 { 210 BDRVQcow2State *s = bs->opaque; 211 int start_of_slice = l2_entry_size(s) * 212 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 213 214 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 215 (void **)l2_slice); 216 } 217 218 /* 219 * Writes an L1 entry to disk (note that depending on the alignment 220 * requirements this function may write more that just one entry in 221 * order to prevent bdrv_pwrite from performing a read-modify-write) 222 */ 223 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 224 { 225 BDRVQcow2State *s = bs->opaque; 226 int l1_start_index; 227 int i, ret; 228 int bufsize = MAX(L1E_SIZE, 229 MIN(bs->file->bs->bl.request_alignment, s->cluster_size)); 230 int nentries = bufsize / L1E_SIZE; 231 g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries); 232 233 if (buf == NULL) { 234 return -ENOMEM; 235 } 236 237 l1_start_index = QEMU_ALIGN_DOWN(l1_index, nentries); 238 for (i = 0; i < MIN(nentries, s->l1_size - l1_start_index); i++) { 239 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 240 } 241 242 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 243 s->l1_table_offset + L1E_SIZE * l1_start_index, bufsize, false); 244 if (ret < 0) { 245 return ret; 246 } 247 248 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 249 ret = bdrv_pwrite_sync(bs->file, 250 s->l1_table_offset + L1E_SIZE * l1_start_index, 251 buf, bufsize); 252 if (ret < 0) { 253 return ret; 254 } 255 256 return 0; 257 } 258 259 /* 260 * l2_allocate 261 * 262 * Allocate a new l2 entry in the file. If l1_index points to an already 263 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 264 * table) copy the contents of the old L2 table into the newly allocated one. 265 * Otherwise the new table is initialized with zeros. 266 * 267 */ 268 269 static int l2_allocate(BlockDriverState *bs, int l1_index) 270 { 271 BDRVQcow2State *s = bs->opaque; 272 uint64_t old_l2_offset; 273 uint64_t *l2_slice = NULL; 274 unsigned slice, slice_size2, n_slices; 275 int64_t l2_offset; 276 int ret; 277 278 old_l2_offset = s->l1_table[l1_index]; 279 280 trace_qcow2_l2_allocate(bs, l1_index); 281 282 /* allocate a new l2 entry */ 283 284 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s)); 285 if (l2_offset < 0) { 286 ret = l2_offset; 287 goto fail; 288 } 289 290 /* The offset must fit in the offset field of the L1 table entry */ 291 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); 292 293 /* If we're allocating the table at offset 0 then something is wrong */ 294 if (l2_offset == 0) { 295 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 296 "allocation of L2 table at offset 0"); 297 ret = -EIO; 298 goto fail; 299 } 300 301 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 302 if (ret < 0) { 303 goto fail; 304 } 305 306 /* allocate a new entry in the l2 cache */ 307 308 slice_size2 = s->l2_slice_size * l2_entry_size(s); 309 n_slices = s->cluster_size / slice_size2; 310 311 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 312 for (slice = 0; slice < n_slices; slice++) { 313 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 314 l2_offset + slice * slice_size2, 315 (void **) &l2_slice); 316 if (ret < 0) { 317 goto fail; 318 } 319 320 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 321 /* if there was no old l2 table, clear the new slice */ 322 memset(l2_slice, 0, slice_size2); 323 } else { 324 uint64_t *old_slice; 325 uint64_t old_l2_slice_offset = 326 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 327 328 /* if there was an old l2 table, read a slice from the disk */ 329 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 330 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 331 (void **) &old_slice); 332 if (ret < 0) { 333 goto fail; 334 } 335 336 memcpy(l2_slice, old_slice, slice_size2); 337 338 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 339 } 340 341 /* write the l2 slice to the file */ 342 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 343 344 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 345 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 346 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 347 } 348 349 ret = qcow2_cache_flush(bs, s->l2_table_cache); 350 if (ret < 0) { 351 goto fail; 352 } 353 354 /* update the L1 entry */ 355 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 356 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 357 ret = qcow2_write_l1_entry(bs, l1_index); 358 if (ret < 0) { 359 goto fail; 360 } 361 362 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 363 return 0; 364 365 fail: 366 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 367 if (l2_slice != NULL) { 368 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 369 } 370 s->l1_table[l1_index] = old_l2_offset; 371 if (l2_offset > 0) { 372 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), 373 QCOW2_DISCARD_ALWAYS); 374 } 375 return ret; 376 } 377 378 /* 379 * For a given L2 entry, count the number of contiguous subclusters of 380 * the same type starting from @sc_from. Compressed clusters are 381 * treated as if they were divided into subclusters of size 382 * s->subcluster_size. 383 * 384 * Return the number of contiguous subclusters and set @type to the 385 * subcluster type. 386 * 387 * If the L2 entry is invalid return -errno and set @type to 388 * QCOW2_SUBCLUSTER_INVALID. 389 */ 390 static int qcow2_get_subcluster_range_type(BlockDriverState *bs, 391 uint64_t l2_entry, 392 uint64_t l2_bitmap, 393 unsigned sc_from, 394 QCow2SubclusterType *type) 395 { 396 BDRVQcow2State *s = bs->opaque; 397 uint32_t val; 398 399 *type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_from); 400 401 if (*type == QCOW2_SUBCLUSTER_INVALID) { 402 return -EINVAL; 403 } else if (!has_subclusters(s) || *type == QCOW2_SUBCLUSTER_COMPRESSED) { 404 return s->subclusters_per_cluster - sc_from; 405 } 406 407 switch (*type) { 408 case QCOW2_SUBCLUSTER_NORMAL: 409 val = l2_bitmap | QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); 410 return cto32(val) - sc_from; 411 412 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 413 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 414 val = (l2_bitmap | QCOW_OFLAG_SUB_ZERO_RANGE(0, sc_from)) >> 32; 415 return cto32(val) - sc_from; 416 417 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 418 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 419 val = ((l2_bitmap >> 32) | l2_bitmap) 420 & ~QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); 421 return ctz32(val) - sc_from; 422 423 default: 424 g_assert_not_reached(); 425 } 426 } 427 428 /* 429 * Return the number of contiguous subclusters of the exact same type 430 * in a given L2 slice, starting from cluster @l2_index, subcluster 431 * @sc_index. Allocated subclusters are required to be contiguous in 432 * the image file. 433 * At most @nb_clusters are checked (note that this means clusters, 434 * not subclusters). 435 * Compressed clusters are always processed one by one but for the 436 * purpose of this count they are treated as if they were divided into 437 * subclusters of size s->subcluster_size. 438 * On failure return -errno and update @l2_index to point to the 439 * invalid entry. 440 */ 441 static int count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters, 442 unsigned sc_index, uint64_t *l2_slice, 443 unsigned *l2_index) 444 { 445 BDRVQcow2State *s = bs->opaque; 446 int i, count = 0; 447 bool check_offset = false; 448 uint64_t expected_offset = 0; 449 QCow2SubclusterType expected_type = QCOW2_SUBCLUSTER_NORMAL, type; 450 451 assert(*l2_index + nb_clusters <= s->l2_slice_size); 452 453 for (i = 0; i < nb_clusters; i++) { 454 unsigned first_sc = (i == 0) ? sc_index : 0; 455 uint64_t l2_entry = get_l2_entry(s, l2_slice, *l2_index + i); 456 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, *l2_index + i); 457 int ret = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, 458 first_sc, &type); 459 if (ret < 0) { 460 *l2_index += i; /* Point to the invalid entry */ 461 return -EIO; 462 } 463 if (i == 0) { 464 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 465 /* Compressed clusters are always processed one by one */ 466 return ret; 467 } 468 expected_type = type; 469 expected_offset = l2_entry & L2E_OFFSET_MASK; 470 check_offset = (type == QCOW2_SUBCLUSTER_NORMAL || 471 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 472 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC); 473 } else if (type != expected_type) { 474 break; 475 } else if (check_offset) { 476 expected_offset += s->cluster_size; 477 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { 478 break; 479 } 480 } 481 count += ret; 482 /* Stop if there are type changes before the end of the cluster */ 483 if (first_sc + ret < s->subclusters_per_cluster) { 484 break; 485 } 486 } 487 488 return count; 489 } 490 491 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 492 uint64_t src_cluster_offset, 493 unsigned offset_in_cluster, 494 QEMUIOVector *qiov) 495 { 496 int ret; 497 498 if (qiov->size == 0) { 499 return 0; 500 } 501 502 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 503 504 if (!bs->drv) { 505 return -ENOMEDIUM; 506 } 507 508 /* 509 * We never deal with requests that don't satisfy 510 * bdrv_check_qiov_request(), and aligning requests to clusters never 511 * breaks this condition. So, do some assertions before calling 512 * bs->drv->bdrv_co_preadv_part() which has int64_t arguments. 513 */ 514 assert(src_cluster_offset <= INT64_MAX); 515 assert(src_cluster_offset + offset_in_cluster <= INT64_MAX); 516 /* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */ 517 assert((uint64_t)qiov->size <= INT64_MAX); 518 bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size, 519 qiov, 0, &error_abort); 520 /* 521 * Call .bdrv_co_readv() directly instead of using the public block-layer 522 * interface. This avoids double I/O throttling and request tracking, 523 * which can lead to deadlock when block layer copy-on-read is enabled. 524 */ 525 ret = bs->drv->bdrv_co_preadv_part(bs, 526 src_cluster_offset + offset_in_cluster, 527 qiov->size, qiov, 0, 0); 528 if (ret < 0) { 529 return ret; 530 } 531 532 return 0; 533 } 534 535 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 536 uint64_t cluster_offset, 537 unsigned offset_in_cluster, 538 QEMUIOVector *qiov) 539 { 540 BDRVQcow2State *s = bs->opaque; 541 int ret; 542 543 if (qiov->size == 0) { 544 return 0; 545 } 546 547 ret = qcow2_pre_write_overlap_check(bs, 0, 548 cluster_offset + offset_in_cluster, qiov->size, true); 549 if (ret < 0) { 550 return ret; 551 } 552 553 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 554 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, 555 qiov->size, qiov, 0); 556 if (ret < 0) { 557 return ret; 558 } 559 560 return 0; 561 } 562 563 564 /* 565 * get_host_offset 566 * 567 * For a given offset of the virtual disk find the equivalent host 568 * offset in the qcow2 file and store it in *host_offset. Neither 569 * offset needs to be aligned to a cluster boundary. 570 * 571 * If the cluster is unallocated then *host_offset will be 0. 572 * If the cluster is compressed then *host_offset will contain the l2 entry. 573 * 574 * On entry, *bytes is the maximum number of contiguous bytes starting at 575 * offset that we are interested in. 576 * 577 * On exit, *bytes is the number of bytes starting at offset that have the same 578 * subcluster type and (if applicable) are stored contiguously in the image 579 * file. The subcluster type is stored in *subcluster_type. 580 * Compressed clusters are always processed one by one. 581 * 582 * Returns 0 on success, -errno in error cases. 583 */ 584 int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, 585 unsigned int *bytes, uint64_t *host_offset, 586 QCow2SubclusterType *subcluster_type) 587 { 588 BDRVQcow2State *s = bs->opaque; 589 unsigned int l2_index, sc_index; 590 uint64_t l1_index, l2_offset, *l2_slice, l2_entry, l2_bitmap; 591 int sc; 592 unsigned int offset_in_cluster; 593 uint64_t bytes_available, bytes_needed, nb_clusters; 594 QCow2SubclusterType type; 595 int ret; 596 597 offset_in_cluster = offset_into_cluster(s, offset); 598 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 599 600 /* compute how many bytes there are between the start of the cluster 601 * containing offset and the end of the l2 slice that contains 602 * the entry pointing to it */ 603 bytes_available = 604 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 605 << s->cluster_bits; 606 607 if (bytes_needed > bytes_available) { 608 bytes_needed = bytes_available; 609 } 610 611 *host_offset = 0; 612 613 /* seek to the l2 offset in the l1 table */ 614 615 l1_index = offset_to_l1_index(s, offset); 616 if (l1_index >= s->l1_size) { 617 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; 618 goto out; 619 } 620 621 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 622 if (!l2_offset) { 623 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; 624 goto out; 625 } 626 627 if (offset_into_cluster(s, l2_offset)) { 628 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 629 " unaligned (L1 index: %#" PRIx64 ")", 630 l2_offset, l1_index); 631 return -EIO; 632 } 633 634 /* load the l2 slice in memory */ 635 636 ret = l2_load(bs, offset, l2_offset, &l2_slice); 637 if (ret < 0) { 638 return ret; 639 } 640 641 /* find the cluster offset for the given disk offset */ 642 643 l2_index = offset_to_l2_slice_index(s, offset); 644 sc_index = offset_to_sc_index(s, offset); 645 l2_entry = get_l2_entry(s, l2_slice, l2_index); 646 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 647 648 nb_clusters = size_to_clusters(s, bytes_needed); 649 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 650 * integers; the minimum cluster size is 512, so this assertion is always 651 * true */ 652 assert(nb_clusters <= INT_MAX); 653 654 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 655 if (s->qcow_version < 3 && (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 656 type == QCOW2_SUBCLUSTER_ZERO_ALLOC)) { 657 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 658 " in pre-v3 image (L2 offset: %#" PRIx64 659 ", L2 index: %#x)", l2_offset, l2_index); 660 ret = -EIO; 661 goto fail; 662 } 663 switch (type) { 664 case QCOW2_SUBCLUSTER_INVALID: 665 break; /* This is handled by count_contiguous_subclusters() below */ 666 case QCOW2_SUBCLUSTER_COMPRESSED: 667 if (has_data_file(bs)) { 668 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " 669 "entry found in image with external data " 670 "file (L2 offset: %#" PRIx64 ", L2 index: " 671 "%#x)", l2_offset, l2_index); 672 ret = -EIO; 673 goto fail; 674 } 675 *host_offset = l2_entry; 676 break; 677 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 678 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 679 break; 680 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 681 case QCOW2_SUBCLUSTER_NORMAL: 682 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: { 683 uint64_t host_cluster_offset = l2_entry & L2E_OFFSET_MASK; 684 *host_offset = host_cluster_offset + offset_in_cluster; 685 if (offset_into_cluster(s, host_cluster_offset)) { 686 qcow2_signal_corruption(bs, true, -1, -1, 687 "Cluster allocation offset %#" 688 PRIx64 " unaligned (L2 offset: %#" PRIx64 689 ", L2 index: %#x)", host_cluster_offset, 690 l2_offset, l2_index); 691 ret = -EIO; 692 goto fail; 693 } 694 if (has_data_file(bs) && *host_offset != offset) { 695 qcow2_signal_corruption(bs, true, -1, -1, 696 "External data file host cluster offset %#" 697 PRIx64 " does not match guest cluster " 698 "offset: %#" PRIx64 699 ", L2 index: %#x)", host_cluster_offset, 700 offset - offset_in_cluster, l2_index); 701 ret = -EIO; 702 goto fail; 703 } 704 break; 705 } 706 default: 707 abort(); 708 } 709 710 sc = count_contiguous_subclusters(bs, nb_clusters, sc_index, 711 l2_slice, &l2_index); 712 if (sc < 0) { 713 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster entry found " 714 " (L2 offset: %#" PRIx64 ", L2 index: %#x)", 715 l2_offset, l2_index); 716 ret = -EIO; 717 goto fail; 718 } 719 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 720 721 bytes_available = ((int64_t)sc + sc_index) << s->subcluster_bits; 722 723 out: 724 if (bytes_available > bytes_needed) { 725 bytes_available = bytes_needed; 726 } 727 728 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 729 * subtracting offset_in_cluster will therefore definitely yield something 730 * not exceeding UINT_MAX */ 731 assert(bytes_available - offset_in_cluster <= UINT_MAX); 732 *bytes = bytes_available - offset_in_cluster; 733 734 *subcluster_type = type; 735 736 return 0; 737 738 fail: 739 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 740 return ret; 741 } 742 743 /* 744 * get_cluster_table 745 * 746 * for a given disk offset, load (and allocate if needed) 747 * the appropriate slice of its l2 table. 748 * 749 * the cluster index in the l2 slice is given to the caller. 750 * 751 * Returns 0 on success, -errno in failure case 752 */ 753 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 754 uint64_t **new_l2_slice, 755 int *new_l2_index) 756 { 757 BDRVQcow2State *s = bs->opaque; 758 unsigned int l2_index; 759 uint64_t l1_index, l2_offset; 760 uint64_t *l2_slice = NULL; 761 int ret; 762 763 /* seek to the l2 offset in the l1 table */ 764 765 l1_index = offset_to_l1_index(s, offset); 766 if (l1_index >= s->l1_size) { 767 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 768 if (ret < 0) { 769 return ret; 770 } 771 } 772 773 assert(l1_index < s->l1_size); 774 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 775 if (offset_into_cluster(s, l2_offset)) { 776 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 777 " unaligned (L1 index: %#" PRIx64 ")", 778 l2_offset, l1_index); 779 return -EIO; 780 } 781 782 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 783 /* First allocate a new L2 table (and do COW if needed) */ 784 ret = l2_allocate(bs, l1_index); 785 if (ret < 0) { 786 return ret; 787 } 788 789 /* Then decrease the refcount of the old table */ 790 if (l2_offset) { 791 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), 792 QCOW2_DISCARD_OTHER); 793 } 794 795 /* Get the offset of the newly-allocated l2 table */ 796 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 797 assert(offset_into_cluster(s, l2_offset) == 0); 798 } 799 800 /* load the l2 slice in memory */ 801 ret = l2_load(bs, offset, l2_offset, &l2_slice); 802 if (ret < 0) { 803 return ret; 804 } 805 806 /* find the cluster offset for the given disk offset */ 807 808 l2_index = offset_to_l2_slice_index(s, offset); 809 810 *new_l2_slice = l2_slice; 811 *new_l2_index = l2_index; 812 813 return 0; 814 } 815 816 /* 817 * alloc_compressed_cluster_offset 818 * 819 * For a given offset on the virtual disk, allocate a new compressed cluster 820 * and put the host offset of the cluster into *host_offset. If a cluster is 821 * already allocated at the offset, return an error. 822 * 823 * Return 0 on success and -errno in error cases 824 */ 825 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 826 uint64_t offset, 827 int compressed_size, 828 uint64_t *host_offset) 829 { 830 BDRVQcow2State *s = bs->opaque; 831 int l2_index, ret; 832 uint64_t *l2_slice; 833 int64_t cluster_offset; 834 int nb_csectors; 835 836 if (has_data_file(bs)) { 837 return 0; 838 } 839 840 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 841 if (ret < 0) { 842 return ret; 843 } 844 845 /* Compression can't overwrite anything. Fail if the cluster was already 846 * allocated. */ 847 cluster_offset = get_l2_entry(s, l2_slice, l2_index); 848 if (cluster_offset & L2E_OFFSET_MASK) { 849 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 850 return -EIO; 851 } 852 853 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 854 if (cluster_offset < 0) { 855 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 856 return cluster_offset; 857 } 858 859 nb_csectors = 860 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE - 861 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE); 862 863 /* The offset and size must fit in their fields of the L2 table entry */ 864 assert((cluster_offset & s->cluster_offset_mask) == cluster_offset); 865 assert((nb_csectors & s->csize_mask) == nb_csectors); 866 867 cluster_offset |= QCOW_OFLAG_COMPRESSED | 868 ((uint64_t)nb_csectors << s->csize_shift); 869 870 /* update L2 table */ 871 872 /* compressed clusters never have the copied flag */ 873 874 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 875 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 876 set_l2_entry(s, l2_slice, l2_index, cluster_offset); 877 if (has_subclusters(s)) { 878 set_l2_bitmap(s, l2_slice, l2_index, 0); 879 } 880 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 881 882 *host_offset = cluster_offset & s->cluster_offset_mask; 883 return 0; 884 } 885 886 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 887 { 888 BDRVQcow2State *s = bs->opaque; 889 Qcow2COWRegion *start = &m->cow_start; 890 Qcow2COWRegion *end = &m->cow_end; 891 unsigned buffer_size; 892 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 893 bool merge_reads; 894 uint8_t *start_buffer, *end_buffer; 895 QEMUIOVector qiov; 896 int ret; 897 898 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 899 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 900 assert(start->offset + start->nb_bytes <= end->offset); 901 902 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) { 903 return 0; 904 } 905 906 /* If we have to read both the start and end COW regions and the 907 * middle region is not too large then perform just one read 908 * operation */ 909 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 910 if (merge_reads) { 911 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 912 } else { 913 /* If we have to do two reads, add some padding in the middle 914 * if necessary to make sure that the end region is optimally 915 * aligned. */ 916 size_t align = bdrv_opt_mem_align(bs); 917 assert(align > 0 && align <= UINT_MAX); 918 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 919 UINT_MAX - end->nb_bytes); 920 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 921 } 922 923 /* Reserve a buffer large enough to store all the data that we're 924 * going to read */ 925 start_buffer = qemu_try_blockalign(bs, buffer_size); 926 if (start_buffer == NULL) { 927 return -ENOMEM; 928 } 929 /* The part of the buffer where the end region is located */ 930 end_buffer = start_buffer + buffer_size - end->nb_bytes; 931 932 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? 933 qemu_iovec_subvec_niov(m->data_qiov, 934 m->data_qiov_offset, 935 data_bytes) 936 : 0)); 937 938 qemu_co_mutex_unlock(&s->lock); 939 /* First we read the existing data from both COW regions. We 940 * either read the whole region in one go, or the start and end 941 * regions separately. */ 942 if (merge_reads) { 943 qemu_iovec_add(&qiov, start_buffer, buffer_size); 944 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 945 } else { 946 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 947 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 948 if (ret < 0) { 949 goto fail; 950 } 951 952 qemu_iovec_reset(&qiov); 953 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 954 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 955 } 956 if (ret < 0) { 957 goto fail; 958 } 959 960 /* Encrypt the data if necessary before writing it */ 961 if (bs->encrypted) { 962 ret = qcow2_co_encrypt(bs, 963 m->alloc_offset + start->offset, 964 m->offset + start->offset, 965 start_buffer, start->nb_bytes); 966 if (ret < 0) { 967 goto fail; 968 } 969 970 ret = qcow2_co_encrypt(bs, 971 m->alloc_offset + end->offset, 972 m->offset + end->offset, 973 end_buffer, end->nb_bytes); 974 if (ret < 0) { 975 goto fail; 976 } 977 } 978 979 /* And now we can write everything. If we have the guest data we 980 * can write everything in one single operation */ 981 if (m->data_qiov) { 982 qemu_iovec_reset(&qiov); 983 if (start->nb_bytes) { 984 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 985 } 986 qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes); 987 if (end->nb_bytes) { 988 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 989 } 990 /* NOTE: we have a write_aio blkdebug event here followed by 991 * a cow_write one in do_perform_cow_write(), but there's only 992 * one single I/O operation */ 993 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 994 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 995 } else { 996 /* If there's no guest data then write both COW regions separately */ 997 qemu_iovec_reset(&qiov); 998 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 999 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 1000 if (ret < 0) { 1001 goto fail; 1002 } 1003 1004 qemu_iovec_reset(&qiov); 1005 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 1006 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 1007 } 1008 1009 fail: 1010 qemu_co_mutex_lock(&s->lock); 1011 1012 /* 1013 * Before we update the L2 table to actually point to the new cluster, we 1014 * need to be sure that the refcounts have been increased and COW was 1015 * handled. 1016 */ 1017 if (ret == 0) { 1018 qcow2_cache_depends_on_flush(s->l2_table_cache); 1019 } 1020 1021 qemu_vfree(start_buffer); 1022 qemu_iovec_destroy(&qiov); 1023 return ret; 1024 } 1025 1026 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 1027 { 1028 BDRVQcow2State *s = bs->opaque; 1029 int i, j = 0, l2_index, ret; 1030 uint64_t *old_cluster, *l2_slice; 1031 uint64_t cluster_offset = m->alloc_offset; 1032 1033 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 1034 assert(m->nb_clusters > 0); 1035 1036 old_cluster = g_try_new(uint64_t, m->nb_clusters); 1037 if (old_cluster == NULL) { 1038 ret = -ENOMEM; 1039 goto err; 1040 } 1041 1042 /* copy content of unmodified sectors */ 1043 ret = perform_cow(bs, m); 1044 if (ret < 0) { 1045 goto err; 1046 } 1047 1048 /* Update L2 table. */ 1049 if (s->use_lazy_refcounts) { 1050 qcow2_mark_dirty(bs); 1051 } 1052 if (qcow2_need_accurate_refcounts(s)) { 1053 qcow2_cache_set_dependency(bs, s->l2_table_cache, 1054 s->refcount_block_cache); 1055 } 1056 1057 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 1058 if (ret < 0) { 1059 goto err; 1060 } 1061 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1062 1063 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 1064 assert(m->cow_end.offset + m->cow_end.nb_bytes <= 1065 m->nb_clusters << s->cluster_bits); 1066 for (i = 0; i < m->nb_clusters; i++) { 1067 uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits); 1068 /* if two concurrent writes happen to the same unallocated cluster 1069 * each write allocates separate cluster and writes data concurrently. 1070 * The first one to complete updates l2 table with pointer to its 1071 * cluster the second one has to do RMW (which is done above by 1072 * perform_cow()), update l2 table with its cluster pointer and free 1073 * old cluster. This is what this loop does */ 1074 if (get_l2_entry(s, l2_slice, l2_index + i) != 0) { 1075 old_cluster[j++] = get_l2_entry(s, l2_slice, l2_index + i); 1076 } 1077 1078 /* The offset must fit in the offset field of the L2 table entry */ 1079 assert((offset & L2E_OFFSET_MASK) == offset); 1080 1081 set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED); 1082 1083 /* Update bitmap with the subclusters that were just written */ 1084 if (has_subclusters(s) && !m->prealloc) { 1085 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1086 unsigned written_from = m->cow_start.offset; 1087 unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes; 1088 int first_sc, last_sc; 1089 /* Narrow written_from and written_to down to the current cluster */ 1090 written_from = MAX(written_from, i << s->cluster_bits); 1091 written_to = MIN(written_to, (i + 1) << s->cluster_bits); 1092 assert(written_from < written_to); 1093 first_sc = offset_to_sc_index(s, written_from); 1094 last_sc = offset_to_sc_index(s, written_to - 1); 1095 l2_bitmap |= QCOW_OFLAG_SUB_ALLOC_RANGE(first_sc, last_sc + 1); 1096 l2_bitmap &= ~QCOW_OFLAG_SUB_ZERO_RANGE(first_sc, last_sc + 1); 1097 set_l2_bitmap(s, l2_slice, l2_index + i, l2_bitmap); 1098 } 1099 } 1100 1101 1102 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1103 1104 /* 1105 * If this was a COW, we need to decrease the refcount of the old cluster. 1106 * 1107 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 1108 * clusters), the next write will reuse them anyway. 1109 */ 1110 if (!m->keep_old_clusters && j != 0) { 1111 for (i = 0; i < j; i++) { 1112 qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER); 1113 } 1114 } 1115 1116 ret = 0; 1117 err: 1118 g_free(old_cluster); 1119 return ret; 1120 } 1121 1122 /** 1123 * Frees the allocated clusters because the request failed and they won't 1124 * actually be linked. 1125 */ 1126 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) 1127 { 1128 BDRVQcow2State *s = bs->opaque; 1129 if (!has_data_file(bs) && !m->keep_old_clusters) { 1130 qcow2_free_clusters(bs, m->alloc_offset, 1131 m->nb_clusters << s->cluster_bits, 1132 QCOW2_DISCARD_NEVER); 1133 } 1134 } 1135 1136 /* 1137 * For a given write request, create a new QCowL2Meta structure, add 1138 * it to @m and the BDRVQcow2State.cluster_allocs list. If the write 1139 * request does not need copy-on-write or changes to the L2 metadata 1140 * then this function does nothing. 1141 * 1142 * @host_cluster_offset points to the beginning of the first cluster. 1143 * 1144 * @guest_offset and @bytes indicate the offset and length of the 1145 * request. 1146 * 1147 * @l2_slice contains the L2 entries of all clusters involved in this 1148 * write request. 1149 * 1150 * If @keep_old is true it means that the clusters were already 1151 * allocated and will be overwritten. If false then the clusters are 1152 * new and we have to decrease the reference count of the old ones. 1153 * 1154 * Returns 0 on success, -errno on failure. 1155 */ 1156 static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset, 1157 uint64_t guest_offset, unsigned bytes, 1158 uint64_t *l2_slice, QCowL2Meta **m, bool keep_old) 1159 { 1160 BDRVQcow2State *s = bs->opaque; 1161 int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset); 1162 uint64_t l2_entry, l2_bitmap; 1163 unsigned cow_start_from, cow_end_to; 1164 unsigned cow_start_to = offset_into_cluster(s, guest_offset); 1165 unsigned cow_end_from = cow_start_to + bytes; 1166 unsigned nb_clusters = size_to_clusters(s, cow_end_from); 1167 QCowL2Meta *old_m = *m; 1168 QCow2SubclusterType type; 1169 int i; 1170 bool skip_cow = keep_old; 1171 1172 assert(nb_clusters <= s->l2_slice_size - l2_index); 1173 1174 /* Check the type of all affected subclusters */ 1175 for (i = 0; i < nb_clusters; i++) { 1176 l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1177 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1178 if (skip_cow) { 1179 unsigned write_from = MAX(cow_start_to, i << s->cluster_bits); 1180 unsigned write_to = MIN(cow_end_from, (i + 1) << s->cluster_bits); 1181 int first_sc = offset_to_sc_index(s, write_from); 1182 int last_sc = offset_to_sc_index(s, write_to - 1); 1183 int cnt = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, 1184 first_sc, &type); 1185 /* Is any of the subclusters of type != QCOW2_SUBCLUSTER_NORMAL ? */ 1186 if (type != QCOW2_SUBCLUSTER_NORMAL || first_sc + cnt <= last_sc) { 1187 skip_cow = false; 1188 } 1189 } else { 1190 /* If we can't skip the cow we can still look for invalid entries */ 1191 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, 0); 1192 } 1193 if (type == QCOW2_SUBCLUSTER_INVALID) { 1194 int l1_index = offset_to_l1_index(s, guest_offset); 1195 uint64_t l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 1196 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster " 1197 "entry found (L2 offset: %#" PRIx64 1198 ", L2 index: %#x)", 1199 l2_offset, l2_index + i); 1200 return -EIO; 1201 } 1202 } 1203 1204 if (skip_cow) { 1205 return 0; 1206 } 1207 1208 /* Get the L2 entry of the first cluster */ 1209 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1210 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 1211 sc_index = offset_to_sc_index(s, guest_offset); 1212 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 1213 1214 if (!keep_old) { 1215 switch (type) { 1216 case QCOW2_SUBCLUSTER_COMPRESSED: 1217 cow_start_from = 0; 1218 break; 1219 case QCOW2_SUBCLUSTER_NORMAL: 1220 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1221 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1222 if (has_subclusters(s)) { 1223 /* Skip all leading zero and unallocated subclusters */ 1224 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; 1225 cow_start_from = 1226 MIN(sc_index, ctz32(alloc_bitmap)) << s->subcluster_bits; 1227 } else { 1228 cow_start_from = 0; 1229 } 1230 break; 1231 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 1232 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 1233 cow_start_from = sc_index << s->subcluster_bits; 1234 break; 1235 default: 1236 g_assert_not_reached(); 1237 } 1238 } else { 1239 switch (type) { 1240 case QCOW2_SUBCLUSTER_NORMAL: 1241 cow_start_from = cow_start_to; 1242 break; 1243 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1244 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1245 cow_start_from = sc_index << s->subcluster_bits; 1246 break; 1247 default: 1248 g_assert_not_reached(); 1249 } 1250 } 1251 1252 /* Get the L2 entry of the last cluster */ 1253 l2_index += nb_clusters - 1; 1254 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1255 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 1256 sc_index = offset_to_sc_index(s, guest_offset + bytes - 1); 1257 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 1258 1259 if (!keep_old) { 1260 switch (type) { 1261 case QCOW2_SUBCLUSTER_COMPRESSED: 1262 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); 1263 break; 1264 case QCOW2_SUBCLUSTER_NORMAL: 1265 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1266 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1267 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); 1268 if (has_subclusters(s)) { 1269 /* Skip all trailing zero and unallocated subclusters */ 1270 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; 1271 cow_end_to -= 1272 MIN(s->subclusters_per_cluster - sc_index - 1, 1273 clz32(alloc_bitmap)) << s->subcluster_bits; 1274 } 1275 break; 1276 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 1277 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 1278 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); 1279 break; 1280 default: 1281 g_assert_not_reached(); 1282 } 1283 } else { 1284 switch (type) { 1285 case QCOW2_SUBCLUSTER_NORMAL: 1286 cow_end_to = cow_end_from; 1287 break; 1288 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1289 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1290 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); 1291 break; 1292 default: 1293 g_assert_not_reached(); 1294 } 1295 } 1296 1297 *m = g_malloc0(sizeof(**m)); 1298 **m = (QCowL2Meta) { 1299 .next = old_m, 1300 1301 .alloc_offset = host_cluster_offset, 1302 .offset = start_of_cluster(s, guest_offset), 1303 .nb_clusters = nb_clusters, 1304 1305 .keep_old_clusters = keep_old, 1306 1307 .cow_start = { 1308 .offset = cow_start_from, 1309 .nb_bytes = cow_start_to - cow_start_from, 1310 }, 1311 .cow_end = { 1312 .offset = cow_end_from, 1313 .nb_bytes = cow_end_to - cow_end_from, 1314 }, 1315 }; 1316 1317 qemu_co_queue_init(&(*m)->dependent_requests); 1318 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1319 1320 return 0; 1321 } 1322 1323 /* 1324 * Returns true if writing to the cluster pointed to by @l2_entry 1325 * requires a new allocation (that is, if the cluster is unallocated 1326 * or has refcount > 1 and therefore cannot be written in-place). 1327 */ 1328 static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry) 1329 { 1330 switch (qcow2_get_cluster_type(bs, l2_entry)) { 1331 case QCOW2_CLUSTER_NORMAL: 1332 case QCOW2_CLUSTER_ZERO_ALLOC: 1333 if (l2_entry & QCOW_OFLAG_COPIED) { 1334 return false; 1335 } 1336 /* fallthrough */ 1337 case QCOW2_CLUSTER_UNALLOCATED: 1338 case QCOW2_CLUSTER_COMPRESSED: 1339 case QCOW2_CLUSTER_ZERO_PLAIN: 1340 return true; 1341 default: 1342 abort(); 1343 } 1344 } 1345 1346 /* 1347 * Returns the number of contiguous clusters that can be written to 1348 * using one single write request, starting from @l2_index. 1349 * At most @nb_clusters are checked. 1350 * 1351 * If @new_alloc is true this counts clusters that are either 1352 * unallocated, or allocated but with refcount > 1 (so they need to be 1353 * newly allocated and COWed). 1354 * 1355 * If @new_alloc is false this counts clusters that are already 1356 * allocated and can be overwritten in-place (this includes clusters 1357 * of type QCOW2_CLUSTER_ZERO_ALLOC). 1358 */ 1359 static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, 1360 uint64_t *l2_slice, int l2_index, 1361 bool new_alloc) 1362 { 1363 BDRVQcow2State *s = bs->opaque; 1364 uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index); 1365 uint64_t expected_offset = l2_entry & L2E_OFFSET_MASK; 1366 int i; 1367 1368 for (i = 0; i < nb_clusters; i++) { 1369 l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1370 if (cluster_needs_new_alloc(bs, l2_entry) != new_alloc) { 1371 break; 1372 } 1373 if (!new_alloc) { 1374 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { 1375 break; 1376 } 1377 expected_offset += s->cluster_size; 1378 } 1379 } 1380 1381 assert(i <= nb_clusters); 1382 return i; 1383 } 1384 1385 /* 1386 * Check if there already is an AIO write request in flight which allocates 1387 * the same cluster. In this case we need to wait until the previous 1388 * request has completed and updated the L2 table accordingly. 1389 * 1390 * Returns: 1391 * 0 if there was no dependency. *cur_bytes indicates the number of 1392 * bytes from guest_offset that can be read before the next 1393 * dependency must be processed (or the request is complete) 1394 * 1395 * -EAGAIN if we had to wait for another request, previously gathered 1396 * information on cluster allocation may be invalid now. The caller 1397 * must start over anyway, so consider *cur_bytes undefined. 1398 */ 1399 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1400 uint64_t *cur_bytes, QCowL2Meta **m) 1401 { 1402 BDRVQcow2State *s = bs->opaque; 1403 QCowL2Meta *old_alloc; 1404 uint64_t bytes = *cur_bytes; 1405 1406 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1407 1408 uint64_t start = guest_offset; 1409 uint64_t end = start + bytes; 1410 uint64_t old_start = start_of_cluster(s, l2meta_cow_start(old_alloc)); 1411 uint64_t old_end = ROUND_UP(l2meta_cow_end(old_alloc), s->cluster_size); 1412 1413 if (end <= old_start || start >= old_end) { 1414 /* No intersection */ 1415 continue; 1416 } 1417 1418 if (old_alloc->keep_old_clusters && 1419 (end <= l2meta_cow_start(old_alloc) || 1420 start >= l2meta_cow_end(old_alloc))) 1421 { 1422 /* 1423 * Clusters intersect but COW areas don't. And cluster itself is 1424 * already allocated. So, there is no actual conflict. 1425 */ 1426 continue; 1427 } 1428 1429 /* Conflict */ 1430 1431 if (start < old_start) { 1432 /* Stop at the start of a running allocation */ 1433 bytes = old_start - start; 1434 } else { 1435 bytes = 0; 1436 } 1437 1438 /* 1439 * Stop if an l2meta already exists. After yielding, it wouldn't 1440 * be valid any more, so we'd have to clean up the old L2Metas 1441 * and deal with requests depending on them before starting to 1442 * gather new ones. Not worth the trouble. 1443 */ 1444 if (bytes == 0 && *m) { 1445 *cur_bytes = 0; 1446 return 0; 1447 } 1448 1449 if (bytes == 0) { 1450 /* 1451 * Wait for the dependency to complete. We need to recheck 1452 * the free/allocated clusters when we continue. 1453 */ 1454 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1455 return -EAGAIN; 1456 } 1457 } 1458 1459 /* Make sure that existing clusters and new allocations are only used up to 1460 * the next dependency if we shortened the request above */ 1461 *cur_bytes = bytes; 1462 1463 return 0; 1464 } 1465 1466 /* 1467 * Checks how many already allocated clusters that don't require a new 1468 * allocation there are at the given guest_offset (up to *bytes). 1469 * If *host_offset is not INV_OFFSET, only physically contiguous clusters 1470 * beginning at this host offset are counted. 1471 * 1472 * Note that guest_offset may not be cluster aligned. In this case, the 1473 * returned *host_offset points to exact byte referenced by guest_offset and 1474 * therefore isn't cluster aligned as well. 1475 * 1476 * Returns: 1477 * 0: if no allocated clusters are available at the given offset. 1478 * *bytes is normally unchanged. It is set to 0 if the cluster 1479 * is allocated and can be overwritten in-place but doesn't have 1480 * the right physical offset. 1481 * 1482 * 1: if allocated clusters that can be overwritten in place are 1483 * available at the requested offset. *bytes may have decreased 1484 * and describes the length of the area that can be written to. 1485 * 1486 * -errno: in error cases 1487 */ 1488 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1489 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1490 { 1491 BDRVQcow2State *s = bs->opaque; 1492 int l2_index; 1493 uint64_t l2_entry, cluster_offset; 1494 uint64_t *l2_slice; 1495 uint64_t nb_clusters; 1496 unsigned int keep_clusters; 1497 int ret; 1498 1499 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1500 *bytes); 1501 1502 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) 1503 == offset_into_cluster(s, *host_offset)); 1504 1505 /* 1506 * Calculate the number of clusters to look for. We stop at L2 slice 1507 * boundaries to keep things simple. 1508 */ 1509 nb_clusters = 1510 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1511 1512 l2_index = offset_to_l2_slice_index(s, guest_offset); 1513 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1514 /* Limit total byte count to BDRV_REQUEST_MAX_BYTES */ 1515 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); 1516 1517 /* Find L2 entry for the first involved cluster */ 1518 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1519 if (ret < 0) { 1520 return ret; 1521 } 1522 1523 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1524 cluster_offset = l2_entry & L2E_OFFSET_MASK; 1525 1526 if (!cluster_needs_new_alloc(bs, l2_entry)) { 1527 if (offset_into_cluster(s, cluster_offset)) { 1528 qcow2_signal_corruption(bs, true, -1, -1, "%s cluster offset " 1529 "%#" PRIx64 " unaligned (guest offset: %#" 1530 PRIx64 ")", l2_entry & QCOW_OFLAG_ZERO ? 1531 "Preallocated zero" : "Data", 1532 cluster_offset, guest_offset); 1533 ret = -EIO; 1534 goto out; 1535 } 1536 1537 /* If a specific host_offset is required, check it */ 1538 if (*host_offset != INV_OFFSET && cluster_offset != *host_offset) { 1539 *bytes = 0; 1540 ret = 0; 1541 goto out; 1542 } 1543 1544 /* We keep all QCOW_OFLAG_COPIED clusters */ 1545 keep_clusters = count_single_write_clusters(bs, nb_clusters, l2_slice, 1546 l2_index, false); 1547 assert(keep_clusters <= nb_clusters); 1548 1549 *bytes = MIN(*bytes, 1550 keep_clusters * s->cluster_size 1551 - offset_into_cluster(s, guest_offset)); 1552 assert(*bytes != 0); 1553 1554 ret = calculate_l2_meta(bs, cluster_offset, guest_offset, 1555 *bytes, l2_slice, m, true); 1556 if (ret < 0) { 1557 goto out; 1558 } 1559 1560 ret = 1; 1561 } else { 1562 ret = 0; 1563 } 1564 1565 /* Cleanup */ 1566 out: 1567 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1568 1569 /* Only return a host offset if we actually made progress. Otherwise we 1570 * would make requirements for handle_alloc() that it can't fulfill */ 1571 if (ret > 0) { 1572 *host_offset = cluster_offset + offset_into_cluster(s, guest_offset); 1573 } 1574 1575 return ret; 1576 } 1577 1578 /* 1579 * Allocates new clusters for the given guest_offset. 1580 * 1581 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1582 * contain the number of clusters that have been allocated and are contiguous 1583 * in the image file. 1584 * 1585 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file 1586 * at which the new clusters must start. *nb_clusters can be 0 on return in 1587 * this case if the cluster at host_offset is already in use. If *host_offset 1588 * is INV_OFFSET, the clusters can be allocated anywhere in the image file. 1589 * 1590 * *host_offset is updated to contain the offset into the image file at which 1591 * the first allocated cluster starts. 1592 * 1593 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1594 * function has been waiting for another request and the allocation must be 1595 * restarted, but the whole request should not be failed. 1596 */ 1597 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1598 uint64_t *host_offset, uint64_t *nb_clusters) 1599 { 1600 BDRVQcow2State *s = bs->opaque; 1601 1602 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1603 *host_offset, *nb_clusters); 1604 1605 if (has_data_file(bs)) { 1606 assert(*host_offset == INV_OFFSET || 1607 *host_offset == start_of_cluster(s, guest_offset)); 1608 *host_offset = start_of_cluster(s, guest_offset); 1609 return 0; 1610 } 1611 1612 /* Allocate new clusters */ 1613 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1614 if (*host_offset == INV_OFFSET) { 1615 int64_t cluster_offset = 1616 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1617 if (cluster_offset < 0) { 1618 return cluster_offset; 1619 } 1620 *host_offset = cluster_offset; 1621 return 0; 1622 } else { 1623 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1624 if (ret < 0) { 1625 return ret; 1626 } 1627 *nb_clusters = ret; 1628 return 0; 1629 } 1630 } 1631 1632 /* 1633 * Allocates new clusters for an area that is either still unallocated or 1634 * cannot be overwritten in-place. If *host_offset is not INV_OFFSET, 1635 * clusters are only allocated if the new allocation can match the specified 1636 * host offset. 1637 * 1638 * Note that guest_offset may not be cluster aligned. In this case, the 1639 * returned *host_offset points to exact byte referenced by guest_offset and 1640 * therefore isn't cluster aligned as well. 1641 * 1642 * Returns: 1643 * 0: if no clusters could be allocated. *bytes is set to 0, 1644 * *host_offset is left unchanged. 1645 * 1646 * 1: if new clusters were allocated. *bytes may be decreased if the 1647 * new allocation doesn't cover all of the requested area. 1648 * *host_offset is updated to contain the host offset of the first 1649 * newly allocated cluster. 1650 * 1651 * -errno: in error cases 1652 */ 1653 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1654 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1655 { 1656 BDRVQcow2State *s = bs->opaque; 1657 int l2_index; 1658 uint64_t *l2_slice; 1659 uint64_t nb_clusters; 1660 int ret; 1661 1662 uint64_t alloc_cluster_offset; 1663 1664 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1665 *bytes); 1666 assert(*bytes > 0); 1667 1668 /* 1669 * Calculate the number of clusters to look for. We stop at L2 slice 1670 * boundaries to keep things simple. 1671 */ 1672 nb_clusters = 1673 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1674 1675 l2_index = offset_to_l2_slice_index(s, guest_offset); 1676 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1677 /* Limit total allocation byte count to BDRV_REQUEST_MAX_BYTES */ 1678 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); 1679 1680 /* Find L2 entry for the first involved cluster */ 1681 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1682 if (ret < 0) { 1683 return ret; 1684 } 1685 1686 nb_clusters = count_single_write_clusters(bs, nb_clusters, 1687 l2_slice, l2_index, true); 1688 1689 /* This function is only called when there were no non-COW clusters, so if 1690 * we can't find any unallocated or COW clusters either, something is 1691 * wrong with our code. */ 1692 assert(nb_clusters > 0); 1693 1694 /* Allocate at a given offset in the image file */ 1695 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : 1696 start_of_cluster(s, *host_offset); 1697 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1698 &nb_clusters); 1699 if (ret < 0) { 1700 goto out; 1701 } 1702 1703 /* Can't extend contiguous allocation */ 1704 if (nb_clusters == 0) { 1705 *bytes = 0; 1706 ret = 0; 1707 goto out; 1708 } 1709 1710 assert(alloc_cluster_offset != INV_OFFSET); 1711 1712 /* 1713 * Save info needed for meta data update. 1714 * 1715 * requested_bytes: Number of bytes from the start of the first 1716 * newly allocated cluster to the end of the (possibly shortened 1717 * before) write request. 1718 * 1719 * avail_bytes: Number of bytes from the start of the first 1720 * newly allocated to the end of the last newly allocated cluster. 1721 * 1722 * nb_bytes: The number of bytes from the start of the first 1723 * newly allocated cluster to the end of the area that the write 1724 * request actually writes to (excluding COW at the end) 1725 */ 1726 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1727 int avail_bytes = nb_clusters << s->cluster_bits; 1728 int nb_bytes = MIN(requested_bytes, avail_bytes); 1729 1730 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1731 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1732 assert(*bytes != 0); 1733 1734 ret = calculate_l2_meta(bs, alloc_cluster_offset, guest_offset, *bytes, 1735 l2_slice, m, false); 1736 if (ret < 0) { 1737 goto out; 1738 } 1739 1740 ret = 1; 1741 1742 out: 1743 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1744 return ret; 1745 } 1746 1747 /* 1748 * For a given area on the virtual disk defined by @offset and @bytes, 1749 * find the corresponding area on the qcow2 image, allocating new 1750 * clusters (or subclusters) if necessary. The result can span a 1751 * combination of allocated and previously unallocated clusters. 1752 * 1753 * Note that offset may not be cluster aligned. In this case, the returned 1754 * *host_offset points to exact byte referenced by offset and therefore 1755 * isn't cluster aligned as well. 1756 * 1757 * On return, @host_offset is set to the beginning of the requested 1758 * area. This area is guaranteed to be contiguous on the qcow2 file 1759 * but it can be smaller than initially requested. In this case @bytes 1760 * is updated with the actual size. 1761 * 1762 * If any clusters or subclusters were allocated then @m contains a 1763 * list with the information of all the affected regions. Note that 1764 * this can happen regardless of whether this function succeeds or 1765 * not. The caller is responsible for updating the L2 metadata of the 1766 * allocated clusters (on success) or freeing them (on failure), and 1767 * for clearing the contents of @m afterwards in both cases. 1768 * 1769 * If the request conflicts with another write request in flight, the coroutine 1770 * is queued and will be reentered when the dependency has completed. 1771 * 1772 * Return 0 on success and -errno in error cases 1773 */ 1774 int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, 1775 unsigned int *bytes, uint64_t *host_offset, 1776 QCowL2Meta **m) 1777 { 1778 BDRVQcow2State *s = bs->opaque; 1779 uint64_t start, remaining; 1780 uint64_t cluster_offset; 1781 uint64_t cur_bytes; 1782 int ret; 1783 1784 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1785 1786 again: 1787 start = offset; 1788 remaining = *bytes; 1789 cluster_offset = INV_OFFSET; 1790 *host_offset = INV_OFFSET; 1791 cur_bytes = 0; 1792 *m = NULL; 1793 1794 while (true) { 1795 1796 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { 1797 *host_offset = cluster_offset; 1798 } 1799 1800 assert(remaining >= cur_bytes); 1801 1802 start += cur_bytes; 1803 remaining -= cur_bytes; 1804 1805 if (cluster_offset != INV_OFFSET) { 1806 cluster_offset += cur_bytes; 1807 } 1808 1809 if (remaining == 0) { 1810 break; 1811 } 1812 1813 cur_bytes = remaining; 1814 1815 /* 1816 * Now start gathering as many contiguous clusters as possible: 1817 * 1818 * 1. Check for overlaps with in-flight allocations 1819 * 1820 * a) Overlap not in the first cluster -> shorten this request and 1821 * let the caller handle the rest in its next loop iteration. 1822 * 1823 * b) Real overlaps of two requests. Yield and restart the search 1824 * for contiguous clusters (the situation could have changed 1825 * while we were sleeping) 1826 * 1827 * c) TODO: Request starts in the same cluster as the in-flight 1828 * allocation ends. Shorten the COW of the in-fight allocation, 1829 * set cluster_offset to write to the same cluster and set up 1830 * the right synchronisation between the in-flight request and 1831 * the new one. 1832 */ 1833 ret = handle_dependencies(bs, start, &cur_bytes, m); 1834 if (ret == -EAGAIN) { 1835 /* Currently handle_dependencies() doesn't yield if we already had 1836 * an allocation. If it did, we would have to clean up the L2Meta 1837 * structs before starting over. */ 1838 assert(*m == NULL); 1839 goto again; 1840 } else if (ret < 0) { 1841 return ret; 1842 } else if (cur_bytes == 0) { 1843 break; 1844 } else { 1845 /* handle_dependencies() may have decreased cur_bytes (shortened 1846 * the allocations below) so that the next dependency is processed 1847 * correctly during the next loop iteration. */ 1848 } 1849 1850 /* 1851 * 2. Count contiguous COPIED clusters. 1852 */ 1853 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1854 if (ret < 0) { 1855 return ret; 1856 } else if (ret) { 1857 continue; 1858 } else if (cur_bytes == 0) { 1859 break; 1860 } 1861 1862 /* 1863 * 3. If the request still hasn't completed, allocate new clusters, 1864 * considering any cluster_offset of steps 1c or 2. 1865 */ 1866 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1867 if (ret < 0) { 1868 return ret; 1869 } else if (ret) { 1870 continue; 1871 } else { 1872 assert(cur_bytes == 0); 1873 break; 1874 } 1875 } 1876 1877 *bytes -= remaining; 1878 assert(*bytes > 0); 1879 assert(*host_offset != INV_OFFSET); 1880 assert(offset_into_cluster(s, *host_offset) == 1881 offset_into_cluster(s, offset)); 1882 1883 return 0; 1884 } 1885 1886 /* 1887 * This discards as many clusters of nb_clusters as possible at once (i.e. 1888 * all clusters in the same L2 slice) and returns the number of discarded 1889 * clusters. 1890 */ 1891 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1892 uint64_t nb_clusters, 1893 enum qcow2_discard_type type, bool full_discard) 1894 { 1895 BDRVQcow2State *s = bs->opaque; 1896 uint64_t *l2_slice; 1897 int l2_index; 1898 int ret; 1899 int i; 1900 1901 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1902 if (ret < 0) { 1903 return ret; 1904 } 1905 1906 /* Limit nb_clusters to one L2 slice */ 1907 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1908 assert(nb_clusters <= INT_MAX); 1909 1910 for (i = 0; i < nb_clusters; i++) { 1911 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1912 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1913 uint64_t new_l2_entry = old_l2_entry; 1914 uint64_t new_l2_bitmap = old_l2_bitmap; 1915 QCow2ClusterType cluster_type = 1916 qcow2_get_cluster_type(bs, old_l2_entry); 1917 1918 /* 1919 * If full_discard is true, the cluster should not read back as zeroes, 1920 * but rather fall through to the backing file. 1921 * 1922 * If full_discard is false, make sure that a discarded area reads back 1923 * as zeroes for v3 images (we cannot do it for v2 without actually 1924 * writing a zero-filled buffer). We can skip the operation if the 1925 * cluster is already marked as zero, or if it's unallocated and we 1926 * don't have a backing file. 1927 * 1928 * TODO We might want to use bdrv_block_status(bs) here, but we're 1929 * holding s->lock, so that doesn't work today. 1930 */ 1931 if (full_discard) { 1932 new_l2_entry = new_l2_bitmap = 0; 1933 } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) { 1934 if (has_subclusters(s)) { 1935 new_l2_entry = 0; 1936 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; 1937 } else { 1938 new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0; 1939 } 1940 } 1941 1942 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { 1943 continue; 1944 } 1945 1946 /* First remove L2 entries */ 1947 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1948 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); 1949 if (has_subclusters(s)) { 1950 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); 1951 } 1952 /* Then decrease the refcount */ 1953 qcow2_free_any_cluster(bs, old_l2_entry, type); 1954 } 1955 1956 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1957 1958 return nb_clusters; 1959 } 1960 1961 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1962 uint64_t bytes, enum qcow2_discard_type type, 1963 bool full_discard) 1964 { 1965 BDRVQcow2State *s = bs->opaque; 1966 uint64_t end_offset = offset + bytes; 1967 uint64_t nb_clusters; 1968 int64_t cleared; 1969 int ret; 1970 1971 /* Caller must pass aligned values, except at image end */ 1972 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1973 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1974 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1975 1976 nb_clusters = size_to_clusters(s, bytes); 1977 1978 s->cache_discards = true; 1979 1980 /* Each L2 slice is handled by its own loop iteration */ 1981 while (nb_clusters > 0) { 1982 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1983 full_discard); 1984 if (cleared < 0) { 1985 ret = cleared; 1986 goto fail; 1987 } 1988 1989 nb_clusters -= cleared; 1990 offset += (cleared * s->cluster_size); 1991 } 1992 1993 ret = 0; 1994 fail: 1995 s->cache_discards = false; 1996 qcow2_process_discards(bs, ret); 1997 1998 return ret; 1999 } 2000 2001 /* 2002 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 2003 * all clusters in the same L2 slice) and returns the number of zeroed 2004 * clusters. 2005 */ 2006 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 2007 uint64_t nb_clusters, int flags) 2008 { 2009 BDRVQcow2State *s = bs->opaque; 2010 uint64_t *l2_slice; 2011 int l2_index; 2012 int ret; 2013 int i; 2014 2015 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 2016 if (ret < 0) { 2017 return ret; 2018 } 2019 2020 /* Limit nb_clusters to one L2 slice */ 2021 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 2022 assert(nb_clusters <= INT_MAX); 2023 2024 for (i = 0; i < nb_clusters; i++) { 2025 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 2026 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 2027 QCow2ClusterType type = qcow2_get_cluster_type(bs, old_l2_entry); 2028 bool unmap = (type == QCOW2_CLUSTER_COMPRESSED) || 2029 ((flags & BDRV_REQ_MAY_UNMAP) && qcow2_cluster_is_allocated(type)); 2030 uint64_t new_l2_entry = unmap ? 0 : old_l2_entry; 2031 uint64_t new_l2_bitmap = old_l2_bitmap; 2032 2033 if (has_subclusters(s)) { 2034 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; 2035 } else { 2036 new_l2_entry |= QCOW_OFLAG_ZERO; 2037 } 2038 2039 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { 2040 continue; 2041 } 2042 2043 /* First update L2 entries */ 2044 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2045 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); 2046 if (has_subclusters(s)) { 2047 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); 2048 } 2049 2050 /* Then decrease the refcount */ 2051 if (unmap) { 2052 qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST); 2053 } 2054 } 2055 2056 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2057 2058 return nb_clusters; 2059 } 2060 2061 static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset, 2062 unsigned nb_subclusters) 2063 { 2064 BDRVQcow2State *s = bs->opaque; 2065 uint64_t *l2_slice; 2066 uint64_t old_l2_bitmap, l2_bitmap; 2067 int l2_index, ret, sc = offset_to_sc_index(s, offset); 2068 2069 /* For full clusters use zero_in_l2_slice() instead */ 2070 assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster); 2071 assert(sc + nb_subclusters <= s->subclusters_per_cluster); 2072 assert(offset_into_subcluster(s, offset) == 0); 2073 2074 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 2075 if (ret < 0) { 2076 return ret; 2077 } 2078 2079 switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) { 2080 case QCOW2_CLUSTER_COMPRESSED: 2081 ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */ 2082 goto out; 2083 case QCOW2_CLUSTER_NORMAL: 2084 case QCOW2_CLUSTER_UNALLOCATED: 2085 break; 2086 default: 2087 g_assert_not_reached(); 2088 } 2089 2090 old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 2091 2092 l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters); 2093 l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters); 2094 2095 if (old_l2_bitmap != l2_bitmap) { 2096 set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap); 2097 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2098 } 2099 2100 ret = 0; 2101 out: 2102 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2103 2104 return ret; 2105 } 2106 2107 int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, 2108 uint64_t bytes, int flags) 2109 { 2110 BDRVQcow2State *s = bs->opaque; 2111 uint64_t end_offset = offset + bytes; 2112 uint64_t nb_clusters; 2113 unsigned head, tail; 2114 int64_t cleared; 2115 int ret; 2116 2117 /* If we have to stay in sync with an external data file, zero out 2118 * s->data_file first. */ 2119 if (data_file_is_raw(bs)) { 2120 assert(has_data_file(bs)); 2121 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); 2122 if (ret < 0) { 2123 return ret; 2124 } 2125 } 2126 2127 /* Caller must pass aligned values, except at image end */ 2128 assert(offset_into_subcluster(s, offset) == 0); 2129 assert(offset_into_subcluster(s, end_offset) == 0 || 2130 end_offset >= bs->total_sectors << BDRV_SECTOR_BITS); 2131 2132 /* 2133 * The zero flag is only supported by version 3 and newer. However, if we 2134 * have no backing file, we can resort to discard in version 2. 2135 */ 2136 if (s->qcow_version < 3) { 2137 if (!bs->backing) { 2138 return qcow2_cluster_discard(bs, offset, bytes, 2139 QCOW2_DISCARD_REQUEST, false); 2140 } 2141 return -ENOTSUP; 2142 } 2143 2144 head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset; 2145 offset += head; 2146 2147 tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 : 2148 end_offset - MAX(offset, start_of_cluster(s, end_offset)); 2149 end_offset -= tail; 2150 2151 s->cache_discards = true; 2152 2153 if (head) { 2154 ret = zero_l2_subclusters(bs, offset - head, 2155 size_to_subclusters(s, head)); 2156 if (ret < 0) { 2157 goto fail; 2158 } 2159 } 2160 2161 /* Each L2 slice is handled by its own loop iteration */ 2162 nb_clusters = size_to_clusters(s, end_offset - offset); 2163 2164 while (nb_clusters > 0) { 2165 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 2166 if (cleared < 0) { 2167 ret = cleared; 2168 goto fail; 2169 } 2170 2171 nb_clusters -= cleared; 2172 offset += (cleared * s->cluster_size); 2173 } 2174 2175 if (tail) { 2176 ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail)); 2177 if (ret < 0) { 2178 goto fail; 2179 } 2180 } 2181 2182 ret = 0; 2183 fail: 2184 s->cache_discards = false; 2185 qcow2_process_discards(bs, ret); 2186 2187 return ret; 2188 } 2189 2190 /* 2191 * Expands all zero clusters in a specific L1 table (or deallocates them, for 2192 * non-backed non-pre-allocated zero clusters). 2193 * 2194 * l1_entries and *visited_l1_entries are used to keep track of progress for 2195 * status_cb(). l1_entries contains the total number of L1 entries and 2196 * *visited_l1_entries counts all visited L1 entries. 2197 */ 2198 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 2199 int l1_size, int64_t *visited_l1_entries, 2200 int64_t l1_entries, 2201 BlockDriverAmendStatusCB *status_cb, 2202 void *cb_opaque) 2203 { 2204 BDRVQcow2State *s = bs->opaque; 2205 bool is_active_l1 = (l1_table == s->l1_table); 2206 uint64_t *l2_slice = NULL; 2207 unsigned slice, slice_size2, n_slices; 2208 int ret; 2209 int i, j; 2210 2211 /* qcow2_downgrade() is not allowed in images with subclusters */ 2212 assert(!has_subclusters(s)); 2213 2214 slice_size2 = s->l2_slice_size * l2_entry_size(s); 2215 n_slices = s->cluster_size / slice_size2; 2216 2217 if (!is_active_l1) { 2218 /* inactive L2 tables require a buffer to be stored in when loading 2219 * them from disk */ 2220 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 2221 if (l2_slice == NULL) { 2222 return -ENOMEM; 2223 } 2224 } 2225 2226 for (i = 0; i < l1_size; i++) { 2227 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 2228 uint64_t l2_refcount; 2229 2230 if (!l2_offset) { 2231 /* unallocated */ 2232 (*visited_l1_entries)++; 2233 if (status_cb) { 2234 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2235 } 2236 continue; 2237 } 2238 2239 if (offset_into_cluster(s, l2_offset)) { 2240 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 2241 PRIx64 " unaligned (L1 index: %#x)", 2242 l2_offset, i); 2243 ret = -EIO; 2244 goto fail; 2245 } 2246 2247 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 2248 &l2_refcount); 2249 if (ret < 0) { 2250 goto fail; 2251 } 2252 2253 for (slice = 0; slice < n_slices; slice++) { 2254 uint64_t slice_offset = l2_offset + slice * slice_size2; 2255 bool l2_dirty = false; 2256 if (is_active_l1) { 2257 /* get active L2 tables from cache */ 2258 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 2259 (void **)&l2_slice); 2260 } else { 2261 /* load inactive L2 tables from disk */ 2262 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 2263 } 2264 if (ret < 0) { 2265 goto fail; 2266 } 2267 2268 for (j = 0; j < s->l2_slice_size; j++) { 2269 uint64_t l2_entry = get_l2_entry(s, l2_slice, j); 2270 int64_t offset = l2_entry & L2E_OFFSET_MASK; 2271 QCow2ClusterType cluster_type = 2272 qcow2_get_cluster_type(bs, l2_entry); 2273 2274 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 2275 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 2276 continue; 2277 } 2278 2279 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2280 if (!bs->backing) { 2281 /* 2282 * not backed; therefore we can simply deallocate the 2283 * cluster. No need to call set_l2_bitmap(), this 2284 * function doesn't support images with subclusters. 2285 */ 2286 set_l2_entry(s, l2_slice, j, 0); 2287 l2_dirty = true; 2288 continue; 2289 } 2290 2291 offset = qcow2_alloc_clusters(bs, s->cluster_size); 2292 if (offset < 0) { 2293 ret = offset; 2294 goto fail; 2295 } 2296 2297 /* The offset must fit in the offset field */ 2298 assert((offset & L2E_OFFSET_MASK) == offset); 2299 2300 if (l2_refcount > 1) { 2301 /* For shared L2 tables, set the refcount accordingly 2302 * (it is already 1 and needs to be l2_refcount) */ 2303 ret = qcow2_update_cluster_refcount( 2304 bs, offset >> s->cluster_bits, 2305 refcount_diff(1, l2_refcount), false, 2306 QCOW2_DISCARD_OTHER); 2307 if (ret < 0) { 2308 qcow2_free_clusters(bs, offset, s->cluster_size, 2309 QCOW2_DISCARD_OTHER); 2310 goto fail; 2311 } 2312 } 2313 } 2314 2315 if (offset_into_cluster(s, offset)) { 2316 int l2_index = slice * s->l2_slice_size + j; 2317 qcow2_signal_corruption( 2318 bs, true, -1, -1, 2319 "Cluster allocation offset " 2320 "%#" PRIx64 " unaligned (L2 offset: %#" 2321 PRIx64 ", L2 index: %#x)", offset, 2322 l2_offset, l2_index); 2323 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2324 qcow2_free_clusters(bs, offset, s->cluster_size, 2325 QCOW2_DISCARD_ALWAYS); 2326 } 2327 ret = -EIO; 2328 goto fail; 2329 } 2330 2331 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 2332 s->cluster_size, true); 2333 if (ret < 0) { 2334 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2335 qcow2_free_clusters(bs, offset, s->cluster_size, 2336 QCOW2_DISCARD_ALWAYS); 2337 } 2338 goto fail; 2339 } 2340 2341 ret = bdrv_pwrite_zeroes(s->data_file, offset, 2342 s->cluster_size, 0); 2343 if (ret < 0) { 2344 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2345 qcow2_free_clusters(bs, offset, s->cluster_size, 2346 QCOW2_DISCARD_ALWAYS); 2347 } 2348 goto fail; 2349 } 2350 2351 if (l2_refcount == 1) { 2352 set_l2_entry(s, l2_slice, j, offset | QCOW_OFLAG_COPIED); 2353 } else { 2354 set_l2_entry(s, l2_slice, j, offset); 2355 } 2356 /* 2357 * No need to call set_l2_bitmap() after set_l2_entry() because 2358 * this function doesn't support images with subclusters. 2359 */ 2360 l2_dirty = true; 2361 } 2362 2363 if (is_active_l1) { 2364 if (l2_dirty) { 2365 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2366 qcow2_cache_depends_on_flush(s->l2_table_cache); 2367 } 2368 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2369 } else { 2370 if (l2_dirty) { 2371 ret = qcow2_pre_write_overlap_check( 2372 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 2373 slice_offset, slice_size2, false); 2374 if (ret < 0) { 2375 goto fail; 2376 } 2377 2378 ret = bdrv_pwrite(bs->file, slice_offset, 2379 l2_slice, slice_size2); 2380 if (ret < 0) { 2381 goto fail; 2382 } 2383 } 2384 } 2385 } 2386 2387 (*visited_l1_entries)++; 2388 if (status_cb) { 2389 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2390 } 2391 } 2392 2393 ret = 0; 2394 2395 fail: 2396 if (l2_slice) { 2397 if (!is_active_l1) { 2398 qemu_vfree(l2_slice); 2399 } else { 2400 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2401 } 2402 } 2403 return ret; 2404 } 2405 2406 /* 2407 * For backed images, expands all zero clusters on the image. For non-backed 2408 * images, deallocates all non-pre-allocated zero clusters (and claims the 2409 * allocation for pre-allocated ones). This is important for downgrading to a 2410 * qcow2 version which doesn't yet support metadata zero clusters. 2411 */ 2412 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2413 BlockDriverAmendStatusCB *status_cb, 2414 void *cb_opaque) 2415 { 2416 BDRVQcow2State *s = bs->opaque; 2417 uint64_t *l1_table = NULL; 2418 int64_t l1_entries = 0, visited_l1_entries = 0; 2419 int ret; 2420 int i, j; 2421 2422 if (status_cb) { 2423 l1_entries = s->l1_size; 2424 for (i = 0; i < s->nb_snapshots; i++) { 2425 l1_entries += s->snapshots[i].l1_size; 2426 } 2427 } 2428 2429 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2430 &visited_l1_entries, l1_entries, 2431 status_cb, cb_opaque); 2432 if (ret < 0) { 2433 goto fail; 2434 } 2435 2436 /* Inactive L1 tables may point to active L2 tables - therefore it is 2437 * necessary to flush the L2 table cache before trying to access the L2 2438 * tables pointed to by inactive L1 entries (else we might try to expand 2439 * zero clusters that have already been expanded); furthermore, it is also 2440 * necessary to empty the L2 table cache, since it may contain tables which 2441 * are now going to be modified directly on disk, bypassing the cache. 2442 * qcow2_cache_empty() does both for us. */ 2443 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2444 if (ret < 0) { 2445 goto fail; 2446 } 2447 2448 for (i = 0; i < s->nb_snapshots; i++) { 2449 int l1_size2; 2450 uint64_t *new_l1_table; 2451 Error *local_err = NULL; 2452 2453 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, 2454 s->snapshots[i].l1_size, L1E_SIZE, 2455 QCOW_MAX_L1_SIZE, "Snapshot L1 table", 2456 &local_err); 2457 if (ret < 0) { 2458 error_report_err(local_err); 2459 goto fail; 2460 } 2461 2462 l1_size2 = s->snapshots[i].l1_size * L1E_SIZE; 2463 new_l1_table = g_try_realloc(l1_table, l1_size2); 2464 2465 if (!new_l1_table) { 2466 ret = -ENOMEM; 2467 goto fail; 2468 } 2469 2470 l1_table = new_l1_table; 2471 2472 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, 2473 l1_table, l1_size2); 2474 if (ret < 0) { 2475 goto fail; 2476 } 2477 2478 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2479 be64_to_cpus(&l1_table[j]); 2480 } 2481 2482 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2483 &visited_l1_entries, l1_entries, 2484 status_cb, cb_opaque); 2485 if (ret < 0) { 2486 goto fail; 2487 } 2488 } 2489 2490 ret = 0; 2491 2492 fail: 2493 g_free(l1_table); 2494 return ret; 2495 } 2496 2497 void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry, 2498 uint64_t *coffset, int *csize) 2499 { 2500 BDRVQcow2State *s = bs->opaque; 2501 int nb_csectors; 2502 2503 assert(qcow2_get_cluster_type(bs, l2_entry) == QCOW2_CLUSTER_COMPRESSED); 2504 2505 *coffset = l2_entry & s->cluster_offset_mask; 2506 2507 nb_csectors = ((l2_entry >> s->csize_shift) & s->csize_mask) + 1; 2508 *csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE - 2509 (*coffset & (QCOW2_COMPRESSED_SECTOR_SIZE - 1)); 2510 } 2511