1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qcow2.h" 30 #include "qemu/bswap.h" 31 #include "trace.h" 32 33 int qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t exact_size) 34 { 35 BDRVQcow2State *s = bs->opaque; 36 int new_l1_size, i, ret; 37 38 if (exact_size >= s->l1_size) { 39 return 0; 40 } 41 42 new_l1_size = exact_size; 43 44 #ifdef DEBUG_ALLOC2 45 fprintf(stderr, "shrink l1_table from %d to %d\n", s->l1_size, new_l1_size); 46 #endif 47 48 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_WRITE_TABLE); 49 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset + 50 new_l1_size * L1E_SIZE, 51 (s->l1_size - new_l1_size) * L1E_SIZE, 0); 52 if (ret < 0) { 53 goto fail; 54 } 55 56 ret = bdrv_flush(bs->file->bs); 57 if (ret < 0) { 58 goto fail; 59 } 60 61 BLKDBG_EVENT(bs->file, BLKDBG_L1_SHRINK_FREE_L2_CLUSTERS); 62 for (i = s->l1_size - 1; i > new_l1_size - 1; i--) { 63 if ((s->l1_table[i] & L1E_OFFSET_MASK) == 0) { 64 continue; 65 } 66 qcow2_free_clusters(bs, s->l1_table[i] & L1E_OFFSET_MASK, 67 s->cluster_size, QCOW2_DISCARD_ALWAYS); 68 s->l1_table[i] = 0; 69 } 70 return 0; 71 72 fail: 73 /* 74 * If the write in the l1_table failed the image may contain a partially 75 * overwritten l1_table. In this case it would be better to clear the 76 * l1_table in memory to avoid possible image corruption. 77 */ 78 memset(s->l1_table + new_l1_size, 0, 79 (s->l1_size - new_l1_size) * L1E_SIZE); 80 return ret; 81 } 82 83 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 84 bool exact_size) 85 { 86 BDRVQcow2State *s = bs->opaque; 87 int new_l1_size2, ret, i; 88 uint64_t *new_l1_table; 89 int64_t old_l1_table_offset, old_l1_size; 90 int64_t new_l1_table_offset, new_l1_size; 91 uint8_t data[12]; 92 93 if (min_size <= s->l1_size) 94 return 0; 95 96 /* Do a sanity check on min_size before trying to calculate new_l1_size 97 * (this prevents overflows during the while loop for the calculation of 98 * new_l1_size) */ 99 if (min_size > INT_MAX / L1E_SIZE) { 100 return -EFBIG; 101 } 102 103 if (exact_size) { 104 new_l1_size = min_size; 105 } else { 106 /* Bump size up to reduce the number of times we have to grow */ 107 new_l1_size = s->l1_size; 108 if (new_l1_size == 0) { 109 new_l1_size = 1; 110 } 111 while (min_size > new_l1_size) { 112 new_l1_size = DIV_ROUND_UP(new_l1_size * 3, 2); 113 } 114 } 115 116 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 117 if (new_l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) { 118 return -EFBIG; 119 } 120 121 #ifdef DEBUG_ALLOC2 122 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 123 s->l1_size, new_l1_size); 124 #endif 125 126 new_l1_size2 = L1E_SIZE * new_l1_size; 127 new_l1_table = qemu_try_blockalign(bs->file->bs, new_l1_size2); 128 if (new_l1_table == NULL) { 129 return -ENOMEM; 130 } 131 memset(new_l1_table, 0, new_l1_size2); 132 133 if (s->l1_size) { 134 memcpy(new_l1_table, s->l1_table, s->l1_size * L1E_SIZE); 135 } 136 137 /* write new table (align to cluster) */ 138 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 139 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 140 if (new_l1_table_offset < 0) { 141 qemu_vfree(new_l1_table); 142 return new_l1_table_offset; 143 } 144 145 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 146 if (ret < 0) { 147 goto fail; 148 } 149 150 /* the L1 position has not yet been updated, so these clusters must 151 * indeed be completely free */ 152 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 153 new_l1_size2, false); 154 if (ret < 0) { 155 goto fail; 156 } 157 158 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 159 for(i = 0; i < s->l1_size; i++) 160 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 161 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 162 new_l1_table, new_l1_size2); 163 if (ret < 0) 164 goto fail; 165 for(i = 0; i < s->l1_size; i++) 166 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 167 168 /* set new table */ 169 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 170 stl_be_p(data, new_l1_size); 171 stq_be_p(data + 4, new_l1_table_offset); 172 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 173 data, sizeof(data)); 174 if (ret < 0) { 175 goto fail; 176 } 177 qemu_vfree(s->l1_table); 178 old_l1_table_offset = s->l1_table_offset; 179 s->l1_table_offset = new_l1_table_offset; 180 s->l1_table = new_l1_table; 181 old_l1_size = s->l1_size; 182 s->l1_size = new_l1_size; 183 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * L1E_SIZE, 184 QCOW2_DISCARD_OTHER); 185 return 0; 186 fail: 187 qemu_vfree(new_l1_table); 188 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 189 QCOW2_DISCARD_OTHER); 190 return ret; 191 } 192 193 /* 194 * l2_load 195 * 196 * @bs: The BlockDriverState 197 * @offset: A guest offset, used to calculate what slice of the L2 198 * table to load. 199 * @l2_offset: Offset to the L2 table in the image file. 200 * @l2_slice: Location to store the pointer to the L2 slice. 201 * 202 * Loads a L2 slice into memory (L2 slices are the parts of L2 tables 203 * that are loaded by the qcow2 cache). If the slice is in the cache, 204 * the cache is used; otherwise the L2 slice is loaded from the image 205 * file. 206 */ 207 static int l2_load(BlockDriverState *bs, uint64_t offset, 208 uint64_t l2_offset, uint64_t **l2_slice) 209 { 210 BDRVQcow2State *s = bs->opaque; 211 int start_of_slice = l2_entry_size(s) * 212 (offset_to_l2_index(s, offset) - offset_to_l2_slice_index(s, offset)); 213 214 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset + start_of_slice, 215 (void **)l2_slice); 216 } 217 218 /* 219 * Writes an L1 entry to disk (note that depending on the alignment 220 * requirements this function may write more that just one entry in 221 * order to prevent bdrv_pwrite from performing a read-modify-write) 222 */ 223 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 224 { 225 BDRVQcow2State *s = bs->opaque; 226 int l1_start_index; 227 int i, ret; 228 int bufsize = MAX(L1E_SIZE, 229 MIN(bs->file->bs->bl.request_alignment, s->cluster_size)); 230 int nentries = bufsize / L1E_SIZE; 231 g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries); 232 233 if (buf == NULL) { 234 return -ENOMEM; 235 } 236 237 l1_start_index = QEMU_ALIGN_DOWN(l1_index, nentries); 238 for (i = 0; i < MIN(nentries, s->l1_size - l1_start_index); i++) { 239 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 240 } 241 242 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 243 s->l1_table_offset + 8 * l1_start_index, bufsize, false); 244 if (ret < 0) { 245 return ret; 246 } 247 248 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 249 ret = bdrv_pwrite_sync(bs->file, 250 s->l1_table_offset + 8 * l1_start_index, 251 buf, bufsize); 252 if (ret < 0) { 253 return ret; 254 } 255 256 return 0; 257 } 258 259 /* 260 * l2_allocate 261 * 262 * Allocate a new l2 entry in the file. If l1_index points to an already 263 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 264 * table) copy the contents of the old L2 table into the newly allocated one. 265 * Otherwise the new table is initialized with zeros. 266 * 267 */ 268 269 static int l2_allocate(BlockDriverState *bs, int l1_index) 270 { 271 BDRVQcow2State *s = bs->opaque; 272 uint64_t old_l2_offset; 273 uint64_t *l2_slice = NULL; 274 unsigned slice, slice_size2, n_slices; 275 int64_t l2_offset; 276 int ret; 277 278 old_l2_offset = s->l1_table[l1_index]; 279 280 trace_qcow2_l2_allocate(bs, l1_index); 281 282 /* allocate a new l2 entry */ 283 284 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * l2_entry_size(s)); 285 if (l2_offset < 0) { 286 ret = l2_offset; 287 goto fail; 288 } 289 290 /* The offset must fit in the offset field of the L1 table entry */ 291 assert((l2_offset & L1E_OFFSET_MASK) == l2_offset); 292 293 /* If we're allocating the table at offset 0 then something is wrong */ 294 if (l2_offset == 0) { 295 qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid " 296 "allocation of L2 table at offset 0"); 297 ret = -EIO; 298 goto fail; 299 } 300 301 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 302 if (ret < 0) { 303 goto fail; 304 } 305 306 /* allocate a new entry in the l2 cache */ 307 308 slice_size2 = s->l2_slice_size * l2_entry_size(s); 309 n_slices = s->cluster_size / slice_size2; 310 311 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 312 for (slice = 0; slice < n_slices; slice++) { 313 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, 314 l2_offset + slice * slice_size2, 315 (void **) &l2_slice); 316 if (ret < 0) { 317 goto fail; 318 } 319 320 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 321 /* if there was no old l2 table, clear the new slice */ 322 memset(l2_slice, 0, slice_size2); 323 } else { 324 uint64_t *old_slice; 325 uint64_t old_l2_slice_offset = 326 (old_l2_offset & L1E_OFFSET_MASK) + slice * slice_size2; 327 328 /* if there was an old l2 table, read a slice from the disk */ 329 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 330 ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_slice_offset, 331 (void **) &old_slice); 332 if (ret < 0) { 333 goto fail; 334 } 335 336 memcpy(l2_slice, old_slice, slice_size2); 337 338 qcow2_cache_put(s->l2_table_cache, (void **) &old_slice); 339 } 340 341 /* write the l2 slice to the file */ 342 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 343 344 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 345 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 346 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 347 } 348 349 ret = qcow2_cache_flush(bs, s->l2_table_cache); 350 if (ret < 0) { 351 goto fail; 352 } 353 354 /* update the L1 entry */ 355 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 356 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 357 ret = qcow2_write_l1_entry(bs, l1_index); 358 if (ret < 0) { 359 goto fail; 360 } 361 362 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 363 return 0; 364 365 fail: 366 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 367 if (l2_slice != NULL) { 368 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 369 } 370 s->l1_table[l1_index] = old_l2_offset; 371 if (l2_offset > 0) { 372 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), 373 QCOW2_DISCARD_ALWAYS); 374 } 375 return ret; 376 } 377 378 /* 379 * For a given L2 entry, count the number of contiguous subclusters of 380 * the same type starting from @sc_from. Compressed clusters are 381 * treated as if they were divided into subclusters of size 382 * s->subcluster_size. 383 * 384 * Return the number of contiguous subclusters and set @type to the 385 * subcluster type. 386 * 387 * If the L2 entry is invalid return -errno and set @type to 388 * QCOW2_SUBCLUSTER_INVALID. 389 */ 390 static int qcow2_get_subcluster_range_type(BlockDriverState *bs, 391 uint64_t l2_entry, 392 uint64_t l2_bitmap, 393 unsigned sc_from, 394 QCow2SubclusterType *type) 395 { 396 BDRVQcow2State *s = bs->opaque; 397 uint32_t val; 398 399 *type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_from); 400 401 if (*type == QCOW2_SUBCLUSTER_INVALID) { 402 return -EINVAL; 403 } else if (!has_subclusters(s) || *type == QCOW2_SUBCLUSTER_COMPRESSED) { 404 return s->subclusters_per_cluster - sc_from; 405 } 406 407 switch (*type) { 408 case QCOW2_SUBCLUSTER_NORMAL: 409 val = l2_bitmap | QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); 410 return cto32(val) - sc_from; 411 412 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 413 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 414 val = (l2_bitmap | QCOW_OFLAG_SUB_ZERO_RANGE(0, sc_from)) >> 32; 415 return cto32(val) - sc_from; 416 417 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 418 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 419 val = ((l2_bitmap >> 32) | l2_bitmap) 420 & ~QCOW_OFLAG_SUB_ALLOC_RANGE(0, sc_from); 421 return ctz32(val) - sc_from; 422 423 default: 424 g_assert_not_reached(); 425 } 426 } 427 428 /* 429 * Return the number of contiguous subclusters of the exact same type 430 * in a given L2 slice, starting from cluster @l2_index, subcluster 431 * @sc_index. Allocated subclusters are required to be contiguous in 432 * the image file. 433 * At most @nb_clusters are checked (note that this means clusters, 434 * not subclusters). 435 * Compressed clusters are always processed one by one but for the 436 * purpose of this count they are treated as if they were divided into 437 * subclusters of size s->subcluster_size. 438 * On failure return -errno and update @l2_index to point to the 439 * invalid entry. 440 */ 441 static int count_contiguous_subclusters(BlockDriverState *bs, int nb_clusters, 442 unsigned sc_index, uint64_t *l2_slice, 443 unsigned *l2_index) 444 { 445 BDRVQcow2State *s = bs->opaque; 446 int i, count = 0; 447 bool check_offset = false; 448 uint64_t expected_offset = 0; 449 QCow2SubclusterType expected_type = QCOW2_SUBCLUSTER_NORMAL, type; 450 451 assert(*l2_index + nb_clusters <= s->l2_slice_size); 452 453 for (i = 0; i < nb_clusters; i++) { 454 unsigned first_sc = (i == 0) ? sc_index : 0; 455 uint64_t l2_entry = get_l2_entry(s, l2_slice, *l2_index + i); 456 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, *l2_index + i); 457 int ret = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, 458 first_sc, &type); 459 if (ret < 0) { 460 *l2_index += i; /* Point to the invalid entry */ 461 return -EIO; 462 } 463 if (i == 0) { 464 if (type == QCOW2_SUBCLUSTER_COMPRESSED) { 465 /* Compressed clusters are always processed one by one */ 466 return ret; 467 } 468 expected_type = type; 469 expected_offset = l2_entry & L2E_OFFSET_MASK; 470 check_offset = (type == QCOW2_SUBCLUSTER_NORMAL || 471 type == QCOW2_SUBCLUSTER_ZERO_ALLOC || 472 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC); 473 } else if (type != expected_type) { 474 break; 475 } else if (check_offset) { 476 expected_offset += s->cluster_size; 477 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { 478 break; 479 } 480 } 481 count += ret; 482 /* Stop if there are type changes before the end of the cluster */ 483 if (first_sc + ret < s->subclusters_per_cluster) { 484 break; 485 } 486 } 487 488 return count; 489 } 490 491 static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, 492 uint64_t src_cluster_offset, 493 unsigned offset_in_cluster, 494 QEMUIOVector *qiov) 495 { 496 int ret; 497 498 if (qiov->size == 0) { 499 return 0; 500 } 501 502 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 503 504 if (!bs->drv) { 505 return -ENOMEDIUM; 506 } 507 508 /* Call .bdrv_co_readv() directly instead of using the public block-layer 509 * interface. This avoids double I/O throttling and request tracking, 510 * which can lead to deadlock when block layer copy-on-read is enabled. 511 */ 512 ret = bs->drv->bdrv_co_preadv_part(bs, 513 src_cluster_offset + offset_in_cluster, 514 qiov->size, qiov, 0, 0); 515 if (ret < 0) { 516 return ret; 517 } 518 519 return 0; 520 } 521 522 static int coroutine_fn do_perform_cow_write(BlockDriverState *bs, 523 uint64_t cluster_offset, 524 unsigned offset_in_cluster, 525 QEMUIOVector *qiov) 526 { 527 BDRVQcow2State *s = bs->opaque; 528 int ret; 529 530 if (qiov->size == 0) { 531 return 0; 532 } 533 534 ret = qcow2_pre_write_overlap_check(bs, 0, 535 cluster_offset + offset_in_cluster, qiov->size, true); 536 if (ret < 0) { 537 return ret; 538 } 539 540 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 541 ret = bdrv_co_pwritev(s->data_file, cluster_offset + offset_in_cluster, 542 qiov->size, qiov, 0); 543 if (ret < 0) { 544 return ret; 545 } 546 547 return 0; 548 } 549 550 551 /* 552 * get_host_offset 553 * 554 * For a given offset of the virtual disk find the equivalent host 555 * offset in the qcow2 file and store it in *host_offset. Neither 556 * offset needs to be aligned to a cluster boundary. 557 * 558 * If the cluster is unallocated then *host_offset will be 0. 559 * If the cluster is compressed then *host_offset will contain the 560 * complete compressed cluster descriptor. 561 * 562 * On entry, *bytes is the maximum number of contiguous bytes starting at 563 * offset that we are interested in. 564 * 565 * On exit, *bytes is the number of bytes starting at offset that have the same 566 * subcluster type and (if applicable) are stored contiguously in the image 567 * file. The subcluster type is stored in *subcluster_type. 568 * Compressed clusters are always processed one by one. 569 * 570 * Returns 0 on success, -errno in error cases. 571 */ 572 int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset, 573 unsigned int *bytes, uint64_t *host_offset, 574 QCow2SubclusterType *subcluster_type) 575 { 576 BDRVQcow2State *s = bs->opaque; 577 unsigned int l2_index, sc_index; 578 uint64_t l1_index, l2_offset, *l2_slice, l2_entry, l2_bitmap; 579 int sc; 580 unsigned int offset_in_cluster; 581 uint64_t bytes_available, bytes_needed, nb_clusters; 582 QCow2SubclusterType type; 583 int ret; 584 585 offset_in_cluster = offset_into_cluster(s, offset); 586 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 587 588 /* compute how many bytes there are between the start of the cluster 589 * containing offset and the end of the l2 slice that contains 590 * the entry pointing to it */ 591 bytes_available = 592 ((uint64_t) (s->l2_slice_size - offset_to_l2_slice_index(s, offset))) 593 << s->cluster_bits; 594 595 if (bytes_needed > bytes_available) { 596 bytes_needed = bytes_available; 597 } 598 599 *host_offset = 0; 600 601 /* seek to the l2 offset in the l1 table */ 602 603 l1_index = offset_to_l1_index(s, offset); 604 if (l1_index >= s->l1_size) { 605 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; 606 goto out; 607 } 608 609 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 610 if (!l2_offset) { 611 type = QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN; 612 goto out; 613 } 614 615 if (offset_into_cluster(s, l2_offset)) { 616 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 617 " unaligned (L1 index: %#" PRIx64 ")", 618 l2_offset, l1_index); 619 return -EIO; 620 } 621 622 /* load the l2 slice in memory */ 623 624 ret = l2_load(bs, offset, l2_offset, &l2_slice); 625 if (ret < 0) { 626 return ret; 627 } 628 629 /* find the cluster offset for the given disk offset */ 630 631 l2_index = offset_to_l2_slice_index(s, offset); 632 sc_index = offset_to_sc_index(s, offset); 633 l2_entry = get_l2_entry(s, l2_slice, l2_index); 634 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 635 636 nb_clusters = size_to_clusters(s, bytes_needed); 637 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 638 * integers; the minimum cluster size is 512, so this assertion is always 639 * true */ 640 assert(nb_clusters <= INT_MAX); 641 642 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 643 if (s->qcow_version < 3 && (type == QCOW2_SUBCLUSTER_ZERO_PLAIN || 644 type == QCOW2_SUBCLUSTER_ZERO_ALLOC)) { 645 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 646 " in pre-v3 image (L2 offset: %#" PRIx64 647 ", L2 index: %#x)", l2_offset, l2_index); 648 ret = -EIO; 649 goto fail; 650 } 651 switch (type) { 652 case QCOW2_SUBCLUSTER_INVALID: 653 break; /* This is handled by count_contiguous_subclusters() below */ 654 case QCOW2_SUBCLUSTER_COMPRESSED: 655 if (has_data_file(bs)) { 656 qcow2_signal_corruption(bs, true, -1, -1, "Compressed cluster " 657 "entry found in image with external data " 658 "file (L2 offset: %#" PRIx64 ", L2 index: " 659 "%#x)", l2_offset, l2_index); 660 ret = -EIO; 661 goto fail; 662 } 663 *host_offset = l2_entry & L2E_COMPRESSED_OFFSET_SIZE_MASK; 664 break; 665 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 666 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 667 break; 668 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 669 case QCOW2_SUBCLUSTER_NORMAL: 670 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: { 671 uint64_t host_cluster_offset = l2_entry & L2E_OFFSET_MASK; 672 *host_offset = host_cluster_offset + offset_in_cluster; 673 if (offset_into_cluster(s, host_cluster_offset)) { 674 qcow2_signal_corruption(bs, true, -1, -1, 675 "Cluster allocation offset %#" 676 PRIx64 " unaligned (L2 offset: %#" PRIx64 677 ", L2 index: %#x)", host_cluster_offset, 678 l2_offset, l2_index); 679 ret = -EIO; 680 goto fail; 681 } 682 if (has_data_file(bs) && *host_offset != offset) { 683 qcow2_signal_corruption(bs, true, -1, -1, 684 "External data file host cluster offset %#" 685 PRIx64 " does not match guest cluster " 686 "offset: %#" PRIx64 687 ", L2 index: %#x)", host_cluster_offset, 688 offset - offset_in_cluster, l2_index); 689 ret = -EIO; 690 goto fail; 691 } 692 break; 693 } 694 default: 695 abort(); 696 } 697 698 sc = count_contiguous_subclusters(bs, nb_clusters, sc_index, 699 l2_slice, &l2_index); 700 if (sc < 0) { 701 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster entry found " 702 " (L2 offset: %#" PRIx64 ", L2 index: %#x)", 703 l2_offset, l2_index); 704 ret = -EIO; 705 goto fail; 706 } 707 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 708 709 bytes_available = ((int64_t)sc + sc_index) << s->subcluster_bits; 710 711 out: 712 if (bytes_available > bytes_needed) { 713 bytes_available = bytes_needed; 714 } 715 716 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 717 * subtracting offset_in_cluster will therefore definitely yield something 718 * not exceeding UINT_MAX */ 719 assert(bytes_available - offset_in_cluster <= UINT_MAX); 720 *bytes = bytes_available - offset_in_cluster; 721 722 *subcluster_type = type; 723 724 return 0; 725 726 fail: 727 qcow2_cache_put(s->l2_table_cache, (void **)&l2_slice); 728 return ret; 729 } 730 731 /* 732 * get_cluster_table 733 * 734 * for a given disk offset, load (and allocate if needed) 735 * the appropriate slice of its l2 table. 736 * 737 * the cluster index in the l2 slice is given to the caller. 738 * 739 * Returns 0 on success, -errno in failure case 740 */ 741 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 742 uint64_t **new_l2_slice, 743 int *new_l2_index) 744 { 745 BDRVQcow2State *s = bs->opaque; 746 unsigned int l2_index; 747 uint64_t l1_index, l2_offset; 748 uint64_t *l2_slice = NULL; 749 int ret; 750 751 /* seek to the l2 offset in the l1 table */ 752 753 l1_index = offset_to_l1_index(s, offset); 754 if (l1_index >= s->l1_size) { 755 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 756 if (ret < 0) { 757 return ret; 758 } 759 } 760 761 assert(l1_index < s->l1_size); 762 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 763 if (offset_into_cluster(s, l2_offset)) { 764 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 765 " unaligned (L1 index: %#" PRIx64 ")", 766 l2_offset, l1_index); 767 return -EIO; 768 } 769 770 if (!(s->l1_table[l1_index] & QCOW_OFLAG_COPIED)) { 771 /* First allocate a new L2 table (and do COW if needed) */ 772 ret = l2_allocate(bs, l1_index); 773 if (ret < 0) { 774 return ret; 775 } 776 777 /* Then decrease the refcount of the old table */ 778 if (l2_offset) { 779 qcow2_free_clusters(bs, l2_offset, s->l2_size * l2_entry_size(s), 780 QCOW2_DISCARD_OTHER); 781 } 782 783 /* Get the offset of the newly-allocated l2 table */ 784 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 785 assert(offset_into_cluster(s, l2_offset) == 0); 786 } 787 788 /* load the l2 slice in memory */ 789 ret = l2_load(bs, offset, l2_offset, &l2_slice); 790 if (ret < 0) { 791 return ret; 792 } 793 794 /* find the cluster offset for the given disk offset */ 795 796 l2_index = offset_to_l2_slice_index(s, offset); 797 798 *new_l2_slice = l2_slice; 799 *new_l2_index = l2_index; 800 801 return 0; 802 } 803 804 /* 805 * alloc_compressed_cluster_offset 806 * 807 * For a given offset on the virtual disk, allocate a new compressed cluster 808 * and put the host offset of the cluster into *host_offset. If a cluster is 809 * already allocated at the offset, return an error. 810 * 811 * Return 0 on success and -errno in error cases 812 */ 813 int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 814 uint64_t offset, 815 int compressed_size, 816 uint64_t *host_offset) 817 { 818 BDRVQcow2State *s = bs->opaque; 819 int l2_index, ret; 820 uint64_t *l2_slice; 821 int64_t cluster_offset; 822 int nb_csectors; 823 824 if (has_data_file(bs)) { 825 return 0; 826 } 827 828 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 829 if (ret < 0) { 830 return ret; 831 } 832 833 /* Compression can't overwrite anything. Fail if the cluster was already 834 * allocated. */ 835 cluster_offset = get_l2_entry(s, l2_slice, l2_index); 836 if (cluster_offset & L2E_OFFSET_MASK) { 837 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 838 return -EIO; 839 } 840 841 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 842 if (cluster_offset < 0) { 843 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 844 return cluster_offset; 845 } 846 847 nb_csectors = 848 (cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE - 849 (cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE); 850 851 /* The offset and size must fit in their fields of the L2 table entry */ 852 assert((cluster_offset & s->cluster_offset_mask) == cluster_offset); 853 assert((nb_csectors & s->csize_mask) == nb_csectors); 854 855 cluster_offset |= QCOW_OFLAG_COMPRESSED | 856 ((uint64_t)nb_csectors << s->csize_shift); 857 858 /* update L2 table */ 859 860 /* compressed clusters never have the copied flag */ 861 862 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 863 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 864 set_l2_entry(s, l2_slice, l2_index, cluster_offset); 865 if (has_subclusters(s)) { 866 set_l2_bitmap(s, l2_slice, l2_index, 0); 867 } 868 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 869 870 *host_offset = cluster_offset & s->cluster_offset_mask; 871 return 0; 872 } 873 874 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m) 875 { 876 BDRVQcow2State *s = bs->opaque; 877 Qcow2COWRegion *start = &m->cow_start; 878 Qcow2COWRegion *end = &m->cow_end; 879 unsigned buffer_size; 880 unsigned data_bytes = end->offset - (start->offset + start->nb_bytes); 881 bool merge_reads; 882 uint8_t *start_buffer, *end_buffer; 883 QEMUIOVector qiov; 884 int ret; 885 886 assert(start->nb_bytes <= UINT_MAX - end->nb_bytes); 887 assert(start->nb_bytes + end->nb_bytes <= UINT_MAX - data_bytes); 888 assert(start->offset + start->nb_bytes <= end->offset); 889 890 if ((start->nb_bytes == 0 && end->nb_bytes == 0) || m->skip_cow) { 891 return 0; 892 } 893 894 /* If we have to read both the start and end COW regions and the 895 * middle region is not too large then perform just one read 896 * operation */ 897 merge_reads = start->nb_bytes && end->nb_bytes && data_bytes <= 16384; 898 if (merge_reads) { 899 buffer_size = start->nb_bytes + data_bytes + end->nb_bytes; 900 } else { 901 /* If we have to do two reads, add some padding in the middle 902 * if necessary to make sure that the end region is optimally 903 * aligned. */ 904 size_t align = bdrv_opt_mem_align(bs); 905 assert(align > 0 && align <= UINT_MAX); 906 assert(QEMU_ALIGN_UP(start->nb_bytes, align) <= 907 UINT_MAX - end->nb_bytes); 908 buffer_size = QEMU_ALIGN_UP(start->nb_bytes, align) + end->nb_bytes; 909 } 910 911 /* Reserve a buffer large enough to store all the data that we're 912 * going to read */ 913 start_buffer = qemu_try_blockalign(bs, buffer_size); 914 if (start_buffer == NULL) { 915 return -ENOMEM; 916 } 917 /* The part of the buffer where the end region is located */ 918 end_buffer = start_buffer + buffer_size - end->nb_bytes; 919 920 qemu_iovec_init(&qiov, 2 + (m->data_qiov ? 921 qemu_iovec_subvec_niov(m->data_qiov, 922 m->data_qiov_offset, 923 data_bytes) 924 : 0)); 925 926 qemu_co_mutex_unlock(&s->lock); 927 /* First we read the existing data from both COW regions. We 928 * either read the whole region in one go, or the start and end 929 * regions separately. */ 930 if (merge_reads) { 931 qemu_iovec_add(&qiov, start_buffer, buffer_size); 932 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 933 } else { 934 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 935 ret = do_perform_cow_read(bs, m->offset, start->offset, &qiov); 936 if (ret < 0) { 937 goto fail; 938 } 939 940 qemu_iovec_reset(&qiov); 941 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 942 ret = do_perform_cow_read(bs, m->offset, end->offset, &qiov); 943 } 944 if (ret < 0) { 945 goto fail; 946 } 947 948 /* Encrypt the data if necessary before writing it */ 949 if (bs->encrypted) { 950 ret = qcow2_co_encrypt(bs, 951 m->alloc_offset + start->offset, 952 m->offset + start->offset, 953 start_buffer, start->nb_bytes); 954 if (ret < 0) { 955 goto fail; 956 } 957 958 ret = qcow2_co_encrypt(bs, 959 m->alloc_offset + end->offset, 960 m->offset + end->offset, 961 end_buffer, end->nb_bytes); 962 if (ret < 0) { 963 goto fail; 964 } 965 } 966 967 /* And now we can write everything. If we have the guest data we 968 * can write everything in one single operation */ 969 if (m->data_qiov) { 970 qemu_iovec_reset(&qiov); 971 if (start->nb_bytes) { 972 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 973 } 974 qemu_iovec_concat(&qiov, m->data_qiov, m->data_qiov_offset, data_bytes); 975 if (end->nb_bytes) { 976 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 977 } 978 /* NOTE: we have a write_aio blkdebug event here followed by 979 * a cow_write one in do_perform_cow_write(), but there's only 980 * one single I/O operation */ 981 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); 982 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 983 } else { 984 /* If there's no guest data then write both COW regions separately */ 985 qemu_iovec_reset(&qiov); 986 qemu_iovec_add(&qiov, start_buffer, start->nb_bytes); 987 ret = do_perform_cow_write(bs, m->alloc_offset, start->offset, &qiov); 988 if (ret < 0) { 989 goto fail; 990 } 991 992 qemu_iovec_reset(&qiov); 993 qemu_iovec_add(&qiov, end_buffer, end->nb_bytes); 994 ret = do_perform_cow_write(bs, m->alloc_offset, end->offset, &qiov); 995 } 996 997 fail: 998 qemu_co_mutex_lock(&s->lock); 999 1000 /* 1001 * Before we update the L2 table to actually point to the new cluster, we 1002 * need to be sure that the refcounts have been increased and COW was 1003 * handled. 1004 */ 1005 if (ret == 0) { 1006 qcow2_cache_depends_on_flush(s->l2_table_cache); 1007 } 1008 1009 qemu_vfree(start_buffer); 1010 qemu_iovec_destroy(&qiov); 1011 return ret; 1012 } 1013 1014 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 1015 { 1016 BDRVQcow2State *s = bs->opaque; 1017 int i, j = 0, l2_index, ret; 1018 uint64_t *old_cluster, *l2_slice; 1019 uint64_t cluster_offset = m->alloc_offset; 1020 1021 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 1022 assert(m->nb_clusters > 0); 1023 1024 old_cluster = g_try_new(uint64_t, m->nb_clusters); 1025 if (old_cluster == NULL) { 1026 ret = -ENOMEM; 1027 goto err; 1028 } 1029 1030 /* copy content of unmodified sectors */ 1031 ret = perform_cow(bs, m); 1032 if (ret < 0) { 1033 goto err; 1034 } 1035 1036 /* Update L2 table. */ 1037 if (s->use_lazy_refcounts) { 1038 qcow2_mark_dirty(bs); 1039 } 1040 if (qcow2_need_accurate_refcounts(s)) { 1041 qcow2_cache_set_dependency(bs, s->l2_table_cache, 1042 s->refcount_block_cache); 1043 } 1044 1045 ret = get_cluster_table(bs, m->offset, &l2_slice, &l2_index); 1046 if (ret < 0) { 1047 goto err; 1048 } 1049 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1050 1051 assert(l2_index + m->nb_clusters <= s->l2_slice_size); 1052 for (i = 0; i < m->nb_clusters; i++) { 1053 uint64_t offset = cluster_offset + ((uint64_t)i << s->cluster_bits); 1054 /* if two concurrent writes happen to the same unallocated cluster 1055 * each write allocates separate cluster and writes data concurrently. 1056 * The first one to complete updates l2 table with pointer to its 1057 * cluster the second one has to do RMW (which is done above by 1058 * perform_cow()), update l2 table with its cluster pointer and free 1059 * old cluster. This is what this loop does */ 1060 if (get_l2_entry(s, l2_slice, l2_index + i) != 0) { 1061 old_cluster[j++] = get_l2_entry(s, l2_slice, l2_index + i); 1062 } 1063 1064 /* The offset must fit in the offset field of the L2 table entry */ 1065 assert((offset & L2E_OFFSET_MASK) == offset); 1066 1067 set_l2_entry(s, l2_slice, l2_index + i, offset | QCOW_OFLAG_COPIED); 1068 1069 /* Update bitmap with the subclusters that were just written */ 1070 if (has_subclusters(s) && !m->prealloc) { 1071 uint64_t l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1072 unsigned written_from = m->cow_start.offset; 1073 unsigned written_to = m->cow_end.offset + m->cow_end.nb_bytes ?: 1074 m->nb_clusters << s->cluster_bits; 1075 int first_sc, last_sc; 1076 /* Narrow written_from and written_to down to the current cluster */ 1077 written_from = MAX(written_from, i << s->cluster_bits); 1078 written_to = MIN(written_to, (i + 1) << s->cluster_bits); 1079 assert(written_from < written_to); 1080 first_sc = offset_to_sc_index(s, written_from); 1081 last_sc = offset_to_sc_index(s, written_to - 1); 1082 l2_bitmap |= QCOW_OFLAG_SUB_ALLOC_RANGE(first_sc, last_sc + 1); 1083 l2_bitmap &= ~QCOW_OFLAG_SUB_ZERO_RANGE(first_sc, last_sc + 1); 1084 set_l2_bitmap(s, l2_slice, l2_index + i, l2_bitmap); 1085 } 1086 } 1087 1088 1089 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1090 1091 /* 1092 * If this was a COW, we need to decrease the refcount of the old cluster. 1093 * 1094 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 1095 * clusters), the next write will reuse them anyway. 1096 */ 1097 if (!m->keep_old_clusters && j != 0) { 1098 for (i = 0; i < j; i++) { 1099 qcow2_free_any_cluster(bs, old_cluster[i], QCOW2_DISCARD_NEVER); 1100 } 1101 } 1102 1103 ret = 0; 1104 err: 1105 g_free(old_cluster); 1106 return ret; 1107 } 1108 1109 /** 1110 * Frees the allocated clusters because the request failed and they won't 1111 * actually be linked. 1112 */ 1113 void qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m) 1114 { 1115 BDRVQcow2State *s = bs->opaque; 1116 if (!has_data_file(bs) && !m->keep_old_clusters) { 1117 qcow2_free_clusters(bs, m->alloc_offset, 1118 m->nb_clusters << s->cluster_bits, 1119 QCOW2_DISCARD_NEVER); 1120 } 1121 } 1122 1123 /* 1124 * For a given write request, create a new QCowL2Meta structure, add 1125 * it to @m and the BDRVQcow2State.cluster_allocs list. If the write 1126 * request does not need copy-on-write or changes to the L2 metadata 1127 * then this function does nothing. 1128 * 1129 * @host_cluster_offset points to the beginning of the first cluster. 1130 * 1131 * @guest_offset and @bytes indicate the offset and length of the 1132 * request. 1133 * 1134 * @l2_slice contains the L2 entries of all clusters involved in this 1135 * write request. 1136 * 1137 * If @keep_old is true it means that the clusters were already 1138 * allocated and will be overwritten. If false then the clusters are 1139 * new and we have to decrease the reference count of the old ones. 1140 * 1141 * Returns 0 on success, -errno on failure. 1142 */ 1143 static int calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset, 1144 uint64_t guest_offset, unsigned bytes, 1145 uint64_t *l2_slice, QCowL2Meta **m, bool keep_old) 1146 { 1147 BDRVQcow2State *s = bs->opaque; 1148 int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset); 1149 uint64_t l2_entry, l2_bitmap; 1150 unsigned cow_start_from, cow_end_to; 1151 unsigned cow_start_to = offset_into_cluster(s, guest_offset); 1152 unsigned cow_end_from = cow_start_to + bytes; 1153 unsigned nb_clusters = size_to_clusters(s, cow_end_from); 1154 QCowL2Meta *old_m = *m; 1155 QCow2SubclusterType type; 1156 int i; 1157 bool skip_cow = keep_old; 1158 1159 assert(nb_clusters <= s->l2_slice_size - l2_index); 1160 1161 /* Check the type of all affected subclusters */ 1162 for (i = 0; i < nb_clusters; i++) { 1163 l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1164 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1165 if (skip_cow) { 1166 unsigned write_from = MAX(cow_start_to, i << s->cluster_bits); 1167 unsigned write_to = MIN(cow_end_from, (i + 1) << s->cluster_bits); 1168 int first_sc = offset_to_sc_index(s, write_from); 1169 int last_sc = offset_to_sc_index(s, write_to - 1); 1170 int cnt = qcow2_get_subcluster_range_type(bs, l2_entry, l2_bitmap, 1171 first_sc, &type); 1172 /* Is any of the subclusters of type != QCOW2_SUBCLUSTER_NORMAL ? */ 1173 if (type != QCOW2_SUBCLUSTER_NORMAL || first_sc + cnt <= last_sc) { 1174 skip_cow = false; 1175 } 1176 } else { 1177 /* If we can't skip the cow we can still look for invalid entries */ 1178 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, 0); 1179 } 1180 if (type == QCOW2_SUBCLUSTER_INVALID) { 1181 int l1_index = offset_to_l1_index(s, guest_offset); 1182 uint64_t l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 1183 qcow2_signal_corruption(bs, true, -1, -1, "Invalid cluster " 1184 "entry found (L2 offset: %#" PRIx64 1185 ", L2 index: %#x)", 1186 l2_offset, l2_index + i); 1187 return -EIO; 1188 } 1189 } 1190 1191 if (skip_cow) { 1192 return 0; 1193 } 1194 1195 /* Get the L2 entry of the first cluster */ 1196 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1197 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 1198 sc_index = offset_to_sc_index(s, guest_offset); 1199 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 1200 1201 if (!keep_old) { 1202 switch (type) { 1203 case QCOW2_SUBCLUSTER_COMPRESSED: 1204 cow_start_from = 0; 1205 break; 1206 case QCOW2_SUBCLUSTER_NORMAL: 1207 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1208 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1209 if (has_subclusters(s)) { 1210 /* Skip all leading zero and unallocated subclusters */ 1211 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; 1212 cow_start_from = 1213 MIN(sc_index, ctz32(alloc_bitmap)) << s->subcluster_bits; 1214 } else { 1215 cow_start_from = 0; 1216 } 1217 break; 1218 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 1219 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 1220 cow_start_from = sc_index << s->subcluster_bits; 1221 break; 1222 default: 1223 g_assert_not_reached(); 1224 } 1225 } else { 1226 switch (type) { 1227 case QCOW2_SUBCLUSTER_NORMAL: 1228 cow_start_from = cow_start_to; 1229 break; 1230 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1231 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1232 cow_start_from = sc_index << s->subcluster_bits; 1233 break; 1234 default: 1235 g_assert_not_reached(); 1236 } 1237 } 1238 1239 /* Get the L2 entry of the last cluster */ 1240 l2_index += nb_clusters - 1; 1241 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1242 l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 1243 sc_index = offset_to_sc_index(s, guest_offset + bytes - 1); 1244 type = qcow2_get_subcluster_type(bs, l2_entry, l2_bitmap, sc_index); 1245 1246 if (!keep_old) { 1247 switch (type) { 1248 case QCOW2_SUBCLUSTER_COMPRESSED: 1249 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); 1250 break; 1251 case QCOW2_SUBCLUSTER_NORMAL: 1252 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1253 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1254 cow_end_to = ROUND_UP(cow_end_from, s->cluster_size); 1255 if (has_subclusters(s)) { 1256 /* Skip all trailing zero and unallocated subclusters */ 1257 uint32_t alloc_bitmap = l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC; 1258 cow_end_to -= 1259 MIN(s->subclusters_per_cluster - sc_index - 1, 1260 clz32(alloc_bitmap)) << s->subcluster_bits; 1261 } 1262 break; 1263 case QCOW2_SUBCLUSTER_ZERO_PLAIN: 1264 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN: 1265 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); 1266 break; 1267 default: 1268 g_assert_not_reached(); 1269 } 1270 } else { 1271 switch (type) { 1272 case QCOW2_SUBCLUSTER_NORMAL: 1273 cow_end_to = cow_end_from; 1274 break; 1275 case QCOW2_SUBCLUSTER_ZERO_ALLOC: 1276 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC: 1277 cow_end_to = ROUND_UP(cow_end_from, s->subcluster_size); 1278 break; 1279 default: 1280 g_assert_not_reached(); 1281 } 1282 } 1283 1284 *m = g_malloc0(sizeof(**m)); 1285 **m = (QCowL2Meta) { 1286 .next = old_m, 1287 1288 .alloc_offset = host_cluster_offset, 1289 .offset = start_of_cluster(s, guest_offset), 1290 .nb_clusters = nb_clusters, 1291 1292 .keep_old_clusters = keep_old, 1293 1294 .cow_start = { 1295 .offset = cow_start_from, 1296 .nb_bytes = cow_start_to - cow_start_from, 1297 }, 1298 .cow_end = { 1299 .offset = cow_end_from, 1300 .nb_bytes = cow_end_to - cow_end_from, 1301 }, 1302 }; 1303 1304 qemu_co_queue_init(&(*m)->dependent_requests); 1305 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1306 1307 return 0; 1308 } 1309 1310 /* 1311 * Returns true if writing to the cluster pointed to by @l2_entry 1312 * requires a new allocation (that is, if the cluster is unallocated 1313 * or has refcount > 1 and therefore cannot be written in-place). 1314 */ 1315 static bool cluster_needs_new_alloc(BlockDriverState *bs, uint64_t l2_entry) 1316 { 1317 switch (qcow2_get_cluster_type(bs, l2_entry)) { 1318 case QCOW2_CLUSTER_NORMAL: 1319 case QCOW2_CLUSTER_ZERO_ALLOC: 1320 if (l2_entry & QCOW_OFLAG_COPIED) { 1321 return false; 1322 } 1323 /* fallthrough */ 1324 case QCOW2_CLUSTER_UNALLOCATED: 1325 case QCOW2_CLUSTER_COMPRESSED: 1326 case QCOW2_CLUSTER_ZERO_PLAIN: 1327 return true; 1328 default: 1329 abort(); 1330 } 1331 } 1332 1333 /* 1334 * Returns the number of contiguous clusters that can be written to 1335 * using one single write request, starting from @l2_index. 1336 * At most @nb_clusters are checked. 1337 * 1338 * If @new_alloc is true this counts clusters that are either 1339 * unallocated, or allocated but with refcount > 1 (so they need to be 1340 * newly allocated and COWed). 1341 * 1342 * If @new_alloc is false this counts clusters that are already 1343 * allocated and can be overwritten in-place (this includes clusters 1344 * of type QCOW2_CLUSTER_ZERO_ALLOC). 1345 */ 1346 static int count_single_write_clusters(BlockDriverState *bs, int nb_clusters, 1347 uint64_t *l2_slice, int l2_index, 1348 bool new_alloc) 1349 { 1350 BDRVQcow2State *s = bs->opaque; 1351 uint64_t l2_entry = get_l2_entry(s, l2_slice, l2_index); 1352 uint64_t expected_offset = l2_entry & L2E_OFFSET_MASK; 1353 int i; 1354 1355 for (i = 0; i < nb_clusters; i++) { 1356 l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1357 if (cluster_needs_new_alloc(bs, l2_entry) != new_alloc) { 1358 break; 1359 } 1360 if (!new_alloc) { 1361 if (expected_offset != (l2_entry & L2E_OFFSET_MASK)) { 1362 break; 1363 } 1364 expected_offset += s->cluster_size; 1365 } 1366 } 1367 1368 assert(i <= nb_clusters); 1369 return i; 1370 } 1371 1372 /* 1373 * Check if there already is an AIO write request in flight which allocates 1374 * the same cluster. In this case we need to wait until the previous 1375 * request has completed and updated the L2 table accordingly. 1376 * 1377 * Returns: 1378 * 0 if there was no dependency. *cur_bytes indicates the number of 1379 * bytes from guest_offset that can be read before the next 1380 * dependency must be processed (or the request is complete) 1381 * 1382 * -EAGAIN if we had to wait for another request, previously gathered 1383 * information on cluster allocation may be invalid now. The caller 1384 * must start over anyway, so consider *cur_bytes undefined. 1385 */ 1386 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 1387 uint64_t *cur_bytes, QCowL2Meta **m) 1388 { 1389 BDRVQcow2State *s = bs->opaque; 1390 QCowL2Meta *old_alloc; 1391 uint64_t bytes = *cur_bytes; 1392 1393 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 1394 1395 uint64_t start = guest_offset; 1396 uint64_t end = start + bytes; 1397 uint64_t old_start = start_of_cluster(s, l2meta_cow_start(old_alloc)); 1398 uint64_t old_end = ROUND_UP(l2meta_cow_end(old_alloc), s->cluster_size); 1399 1400 if (end <= old_start || start >= old_end) { 1401 /* No intersection */ 1402 } else { 1403 if (start < old_start) { 1404 /* Stop at the start of a running allocation */ 1405 bytes = old_start - start; 1406 } else { 1407 bytes = 0; 1408 } 1409 1410 /* Stop if already an l2meta exists. After yielding, it wouldn't 1411 * be valid any more, so we'd have to clean up the old L2Metas 1412 * and deal with requests depending on them before starting to 1413 * gather new ones. Not worth the trouble. */ 1414 if (bytes == 0 && *m) { 1415 *cur_bytes = 0; 1416 return 0; 1417 } 1418 1419 if (bytes == 0) { 1420 /* Wait for the dependency to complete. We need to recheck 1421 * the free/allocated clusters when we continue. */ 1422 qemu_co_queue_wait(&old_alloc->dependent_requests, &s->lock); 1423 return -EAGAIN; 1424 } 1425 } 1426 } 1427 1428 /* Make sure that existing clusters and new allocations are only used up to 1429 * the next dependency if we shortened the request above */ 1430 *cur_bytes = bytes; 1431 1432 return 0; 1433 } 1434 1435 /* 1436 * Checks how many already allocated clusters that don't require a new 1437 * allocation there are at the given guest_offset (up to *bytes). 1438 * If *host_offset is not INV_OFFSET, only physically contiguous clusters 1439 * beginning at this host offset are counted. 1440 * 1441 * Note that guest_offset may not be cluster aligned. In this case, the 1442 * returned *host_offset points to exact byte referenced by guest_offset and 1443 * therefore isn't cluster aligned as well. 1444 * 1445 * Returns: 1446 * 0: if no allocated clusters are available at the given offset. 1447 * *bytes is normally unchanged. It is set to 0 if the cluster 1448 * is allocated and can be overwritten in-place but doesn't have 1449 * the right physical offset. 1450 * 1451 * 1: if allocated clusters that can be overwritten in place are 1452 * available at the requested offset. *bytes may have decreased 1453 * and describes the length of the area that can be written to. 1454 * 1455 * -errno: in error cases 1456 */ 1457 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 1458 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1459 { 1460 BDRVQcow2State *s = bs->opaque; 1461 int l2_index; 1462 uint64_t l2_entry, cluster_offset; 1463 uint64_t *l2_slice; 1464 uint64_t nb_clusters; 1465 unsigned int keep_clusters; 1466 int ret; 1467 1468 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 1469 *bytes); 1470 1471 assert(*host_offset == INV_OFFSET || offset_into_cluster(s, guest_offset) 1472 == offset_into_cluster(s, *host_offset)); 1473 1474 /* 1475 * Calculate the number of clusters to look for. We stop at L2 slice 1476 * boundaries to keep things simple. 1477 */ 1478 nb_clusters = 1479 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1480 1481 l2_index = offset_to_l2_slice_index(s, guest_offset); 1482 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1483 /* Limit total byte count to BDRV_REQUEST_MAX_BYTES */ 1484 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); 1485 1486 /* Find L2 entry for the first involved cluster */ 1487 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1488 if (ret < 0) { 1489 return ret; 1490 } 1491 1492 l2_entry = get_l2_entry(s, l2_slice, l2_index); 1493 cluster_offset = l2_entry & L2E_OFFSET_MASK; 1494 1495 if (!cluster_needs_new_alloc(bs, l2_entry)) { 1496 if (offset_into_cluster(s, cluster_offset)) { 1497 qcow2_signal_corruption(bs, true, -1, -1, "%s cluster offset " 1498 "%#" PRIx64 " unaligned (guest offset: %#" 1499 PRIx64 ")", l2_entry & QCOW_OFLAG_ZERO ? 1500 "Preallocated zero" : "Data", 1501 cluster_offset, guest_offset); 1502 ret = -EIO; 1503 goto out; 1504 } 1505 1506 /* If a specific host_offset is required, check it */ 1507 if (*host_offset != INV_OFFSET && cluster_offset != *host_offset) { 1508 *bytes = 0; 1509 ret = 0; 1510 goto out; 1511 } 1512 1513 /* We keep all QCOW_OFLAG_COPIED clusters */ 1514 keep_clusters = count_single_write_clusters(bs, nb_clusters, l2_slice, 1515 l2_index, false); 1516 assert(keep_clusters <= nb_clusters); 1517 1518 *bytes = MIN(*bytes, 1519 keep_clusters * s->cluster_size 1520 - offset_into_cluster(s, guest_offset)); 1521 assert(*bytes != 0); 1522 1523 ret = calculate_l2_meta(bs, cluster_offset, guest_offset, 1524 *bytes, l2_slice, m, true); 1525 if (ret < 0) { 1526 goto out; 1527 } 1528 1529 ret = 1; 1530 } else { 1531 ret = 0; 1532 } 1533 1534 /* Cleanup */ 1535 out: 1536 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1537 1538 /* Only return a host offset if we actually made progress. Otherwise we 1539 * would make requirements for handle_alloc() that it can't fulfill */ 1540 if (ret > 0) { 1541 *host_offset = cluster_offset + offset_into_cluster(s, guest_offset); 1542 } 1543 1544 return ret; 1545 } 1546 1547 /* 1548 * Allocates new clusters for the given guest_offset. 1549 * 1550 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1551 * contain the number of clusters that have been allocated and are contiguous 1552 * in the image file. 1553 * 1554 * If *host_offset is not INV_OFFSET, it specifies the offset in the image file 1555 * at which the new clusters must start. *nb_clusters can be 0 on return in 1556 * this case if the cluster at host_offset is already in use. If *host_offset 1557 * is INV_OFFSET, the clusters can be allocated anywhere in the image file. 1558 * 1559 * *host_offset is updated to contain the offset into the image file at which 1560 * the first allocated cluster starts. 1561 * 1562 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1563 * function has been waiting for another request and the allocation must be 1564 * restarted, but the whole request should not be failed. 1565 */ 1566 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1567 uint64_t *host_offset, uint64_t *nb_clusters) 1568 { 1569 BDRVQcow2State *s = bs->opaque; 1570 1571 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1572 *host_offset, *nb_clusters); 1573 1574 if (has_data_file(bs)) { 1575 assert(*host_offset == INV_OFFSET || 1576 *host_offset == start_of_cluster(s, guest_offset)); 1577 *host_offset = start_of_cluster(s, guest_offset); 1578 return 0; 1579 } 1580 1581 /* Allocate new clusters */ 1582 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1583 if (*host_offset == INV_OFFSET) { 1584 int64_t cluster_offset = 1585 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1586 if (cluster_offset < 0) { 1587 return cluster_offset; 1588 } 1589 *host_offset = cluster_offset; 1590 return 0; 1591 } else { 1592 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1593 if (ret < 0) { 1594 return ret; 1595 } 1596 *nb_clusters = ret; 1597 return 0; 1598 } 1599 } 1600 1601 /* 1602 * Allocates new clusters for an area that is either still unallocated or 1603 * cannot be overwritten in-place. If *host_offset is not INV_OFFSET, 1604 * clusters are only allocated if the new allocation can match the specified 1605 * host offset. 1606 * 1607 * Note that guest_offset may not be cluster aligned. In this case, the 1608 * returned *host_offset points to exact byte referenced by guest_offset and 1609 * therefore isn't cluster aligned as well. 1610 * 1611 * Returns: 1612 * 0: if no clusters could be allocated. *bytes is set to 0, 1613 * *host_offset is left unchanged. 1614 * 1615 * 1: if new clusters were allocated. *bytes may be decreased if the 1616 * new allocation doesn't cover all of the requested area. 1617 * *host_offset is updated to contain the host offset of the first 1618 * newly allocated cluster. 1619 * 1620 * -errno: in error cases 1621 */ 1622 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1623 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1624 { 1625 BDRVQcow2State *s = bs->opaque; 1626 int l2_index; 1627 uint64_t *l2_slice; 1628 uint64_t nb_clusters; 1629 int ret; 1630 1631 uint64_t alloc_cluster_offset; 1632 1633 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1634 *bytes); 1635 assert(*bytes > 0); 1636 1637 /* 1638 * Calculate the number of clusters to look for. We stop at L2 slice 1639 * boundaries to keep things simple. 1640 */ 1641 nb_clusters = 1642 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1643 1644 l2_index = offset_to_l2_slice_index(s, guest_offset); 1645 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1646 /* Limit total allocation byte count to BDRV_REQUEST_MAX_BYTES */ 1647 nb_clusters = MIN(nb_clusters, BDRV_REQUEST_MAX_BYTES >> s->cluster_bits); 1648 1649 /* Find L2 entry for the first involved cluster */ 1650 ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index); 1651 if (ret < 0) { 1652 return ret; 1653 } 1654 1655 nb_clusters = count_single_write_clusters(bs, nb_clusters, 1656 l2_slice, l2_index, true); 1657 1658 /* This function is only called when there were no non-COW clusters, so if 1659 * we can't find any unallocated or COW clusters either, something is 1660 * wrong with our code. */ 1661 assert(nb_clusters > 0); 1662 1663 /* Allocate at a given offset in the image file */ 1664 alloc_cluster_offset = *host_offset == INV_OFFSET ? INV_OFFSET : 1665 start_of_cluster(s, *host_offset); 1666 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1667 &nb_clusters); 1668 if (ret < 0) { 1669 goto out; 1670 } 1671 1672 /* Can't extend contiguous allocation */ 1673 if (nb_clusters == 0) { 1674 *bytes = 0; 1675 ret = 0; 1676 goto out; 1677 } 1678 1679 assert(alloc_cluster_offset != INV_OFFSET); 1680 1681 /* 1682 * Save info needed for meta data update. 1683 * 1684 * requested_bytes: Number of bytes from the start of the first 1685 * newly allocated cluster to the end of the (possibly shortened 1686 * before) write request. 1687 * 1688 * avail_bytes: Number of bytes from the start of the first 1689 * newly allocated to the end of the last newly allocated cluster. 1690 * 1691 * nb_bytes: The number of bytes from the start of the first 1692 * newly allocated cluster to the end of the area that the write 1693 * request actually writes to (excluding COW at the end) 1694 */ 1695 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1696 int avail_bytes = nb_clusters << s->cluster_bits; 1697 int nb_bytes = MIN(requested_bytes, avail_bytes); 1698 1699 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1700 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1701 assert(*bytes != 0); 1702 1703 ret = calculate_l2_meta(bs, alloc_cluster_offset, guest_offset, *bytes, 1704 l2_slice, m, false); 1705 if (ret < 0) { 1706 goto out; 1707 } 1708 1709 ret = 1; 1710 1711 out: 1712 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1713 return ret; 1714 } 1715 1716 /* 1717 * For a given area on the virtual disk defined by @offset and @bytes, 1718 * find the corresponding area on the qcow2 image, allocating new 1719 * clusters (or subclusters) if necessary. The result can span a 1720 * combination of allocated and previously unallocated clusters. 1721 * 1722 * Note that offset may not be cluster aligned. In this case, the returned 1723 * *host_offset points to exact byte referenced by offset and therefore 1724 * isn't cluster aligned as well. 1725 * 1726 * On return, @host_offset is set to the beginning of the requested 1727 * area. This area is guaranteed to be contiguous on the qcow2 file 1728 * but it can be smaller than initially requested. In this case @bytes 1729 * is updated with the actual size. 1730 * 1731 * If any clusters or subclusters were allocated then @m contains a 1732 * list with the information of all the affected regions. Note that 1733 * this can happen regardless of whether this function succeeds or 1734 * not. The caller is responsible for updating the L2 metadata of the 1735 * allocated clusters (on success) or freeing them (on failure), and 1736 * for clearing the contents of @m afterwards in both cases. 1737 * 1738 * If the request conflicts with another write request in flight, the coroutine 1739 * is queued and will be reentered when the dependency has completed. 1740 * 1741 * Return 0 on success and -errno in error cases 1742 */ 1743 int qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset, 1744 unsigned int *bytes, uint64_t *host_offset, 1745 QCowL2Meta **m) 1746 { 1747 BDRVQcow2State *s = bs->opaque; 1748 uint64_t start, remaining; 1749 uint64_t cluster_offset; 1750 uint64_t cur_bytes; 1751 int ret; 1752 1753 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1754 1755 again: 1756 start = offset; 1757 remaining = *bytes; 1758 cluster_offset = INV_OFFSET; 1759 *host_offset = INV_OFFSET; 1760 cur_bytes = 0; 1761 *m = NULL; 1762 1763 while (true) { 1764 1765 if (*host_offset == INV_OFFSET && cluster_offset != INV_OFFSET) { 1766 *host_offset = cluster_offset; 1767 } 1768 1769 assert(remaining >= cur_bytes); 1770 1771 start += cur_bytes; 1772 remaining -= cur_bytes; 1773 1774 if (cluster_offset != INV_OFFSET) { 1775 cluster_offset += cur_bytes; 1776 } 1777 1778 if (remaining == 0) { 1779 break; 1780 } 1781 1782 cur_bytes = remaining; 1783 1784 /* 1785 * Now start gathering as many contiguous clusters as possible: 1786 * 1787 * 1. Check for overlaps with in-flight allocations 1788 * 1789 * a) Overlap not in the first cluster -> shorten this request and 1790 * let the caller handle the rest in its next loop iteration. 1791 * 1792 * b) Real overlaps of two requests. Yield and restart the search 1793 * for contiguous clusters (the situation could have changed 1794 * while we were sleeping) 1795 * 1796 * c) TODO: Request starts in the same cluster as the in-flight 1797 * allocation ends. Shorten the COW of the in-fight allocation, 1798 * set cluster_offset to write to the same cluster and set up 1799 * the right synchronisation between the in-flight request and 1800 * the new one. 1801 */ 1802 ret = handle_dependencies(bs, start, &cur_bytes, m); 1803 if (ret == -EAGAIN) { 1804 /* Currently handle_dependencies() doesn't yield if we already had 1805 * an allocation. If it did, we would have to clean up the L2Meta 1806 * structs before starting over. */ 1807 assert(*m == NULL); 1808 goto again; 1809 } else if (ret < 0) { 1810 return ret; 1811 } else if (cur_bytes == 0) { 1812 break; 1813 } else { 1814 /* handle_dependencies() may have decreased cur_bytes (shortened 1815 * the allocations below) so that the next dependency is processed 1816 * correctly during the next loop iteration. */ 1817 } 1818 1819 /* 1820 * 2. Count contiguous COPIED clusters. 1821 */ 1822 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1823 if (ret < 0) { 1824 return ret; 1825 } else if (ret) { 1826 continue; 1827 } else if (cur_bytes == 0) { 1828 break; 1829 } 1830 1831 /* 1832 * 3. If the request still hasn't completed, allocate new clusters, 1833 * considering any cluster_offset of steps 1c or 2. 1834 */ 1835 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1836 if (ret < 0) { 1837 return ret; 1838 } else if (ret) { 1839 continue; 1840 } else { 1841 assert(cur_bytes == 0); 1842 break; 1843 } 1844 } 1845 1846 *bytes -= remaining; 1847 assert(*bytes > 0); 1848 assert(*host_offset != INV_OFFSET); 1849 assert(offset_into_cluster(s, *host_offset) == 1850 offset_into_cluster(s, offset)); 1851 1852 return 0; 1853 } 1854 1855 /* 1856 * This discards as many clusters of nb_clusters as possible at once (i.e. 1857 * all clusters in the same L2 slice) and returns the number of discarded 1858 * clusters. 1859 */ 1860 static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1861 uint64_t nb_clusters, 1862 enum qcow2_discard_type type, bool full_discard) 1863 { 1864 BDRVQcow2State *s = bs->opaque; 1865 uint64_t *l2_slice; 1866 int l2_index; 1867 int ret; 1868 int i; 1869 1870 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1871 if (ret < 0) { 1872 return ret; 1873 } 1874 1875 /* Limit nb_clusters to one L2 slice */ 1876 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1877 assert(nb_clusters <= INT_MAX); 1878 1879 for (i = 0; i < nb_clusters; i++) { 1880 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1881 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1882 uint64_t new_l2_entry = old_l2_entry; 1883 uint64_t new_l2_bitmap = old_l2_bitmap; 1884 QCow2ClusterType cluster_type = 1885 qcow2_get_cluster_type(bs, old_l2_entry); 1886 1887 /* 1888 * If full_discard is true, the cluster should not read back as zeroes, 1889 * but rather fall through to the backing file. 1890 * 1891 * If full_discard is false, make sure that a discarded area reads back 1892 * as zeroes for v3 images (we cannot do it for v2 without actually 1893 * writing a zero-filled buffer). We can skip the operation if the 1894 * cluster is already marked as zero, or if it's unallocated and we 1895 * don't have a backing file. 1896 * 1897 * TODO We might want to use bdrv_block_status(bs) here, but we're 1898 * holding s->lock, so that doesn't work today. 1899 */ 1900 if (full_discard) { 1901 new_l2_entry = new_l2_bitmap = 0; 1902 } else if (bs->backing || qcow2_cluster_is_allocated(cluster_type)) { 1903 if (has_subclusters(s)) { 1904 new_l2_entry = 0; 1905 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; 1906 } else { 1907 new_l2_entry = s->qcow_version >= 3 ? QCOW_OFLAG_ZERO : 0; 1908 } 1909 } 1910 1911 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { 1912 continue; 1913 } 1914 1915 /* First remove L2 entries */ 1916 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 1917 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); 1918 if (has_subclusters(s)) { 1919 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); 1920 } 1921 /* Then decrease the refcount */ 1922 qcow2_free_any_cluster(bs, old_l2_entry, type); 1923 } 1924 1925 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 1926 1927 return nb_clusters; 1928 } 1929 1930 int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, 1931 uint64_t bytes, enum qcow2_discard_type type, 1932 bool full_discard) 1933 { 1934 BDRVQcow2State *s = bs->opaque; 1935 uint64_t end_offset = offset + bytes; 1936 uint64_t nb_clusters; 1937 int64_t cleared; 1938 int ret; 1939 1940 /* Caller must pass aligned values, except at image end */ 1941 assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); 1942 assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) || 1943 end_offset == bs->total_sectors << BDRV_SECTOR_BITS); 1944 1945 nb_clusters = size_to_clusters(s, bytes); 1946 1947 s->cache_discards = true; 1948 1949 /* Each L2 slice is handled by its own loop iteration */ 1950 while (nb_clusters > 0) { 1951 cleared = discard_in_l2_slice(bs, offset, nb_clusters, type, 1952 full_discard); 1953 if (cleared < 0) { 1954 ret = cleared; 1955 goto fail; 1956 } 1957 1958 nb_clusters -= cleared; 1959 offset += (cleared * s->cluster_size); 1960 } 1961 1962 ret = 0; 1963 fail: 1964 s->cache_discards = false; 1965 qcow2_process_discards(bs, ret); 1966 1967 return ret; 1968 } 1969 1970 /* 1971 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1972 * all clusters in the same L2 slice) and returns the number of zeroed 1973 * clusters. 1974 */ 1975 static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset, 1976 uint64_t nb_clusters, int flags) 1977 { 1978 BDRVQcow2State *s = bs->opaque; 1979 uint64_t *l2_slice; 1980 int l2_index; 1981 int ret; 1982 int i; 1983 1984 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 1985 if (ret < 0) { 1986 return ret; 1987 } 1988 1989 /* Limit nb_clusters to one L2 slice */ 1990 nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index); 1991 assert(nb_clusters <= INT_MAX); 1992 1993 for (i = 0; i < nb_clusters; i++) { 1994 uint64_t old_l2_entry = get_l2_entry(s, l2_slice, l2_index + i); 1995 uint64_t old_l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index + i); 1996 QCow2ClusterType type = qcow2_get_cluster_type(bs, old_l2_entry); 1997 bool unmap = (type == QCOW2_CLUSTER_COMPRESSED) || 1998 ((flags & BDRV_REQ_MAY_UNMAP) && qcow2_cluster_is_allocated(type)); 1999 uint64_t new_l2_entry = unmap ? 0 : old_l2_entry; 2000 uint64_t new_l2_bitmap = old_l2_bitmap; 2001 2002 if (has_subclusters(s)) { 2003 new_l2_bitmap = QCOW_L2_BITMAP_ALL_ZEROES; 2004 } else { 2005 new_l2_entry |= QCOW_OFLAG_ZERO; 2006 } 2007 2008 if (old_l2_entry == new_l2_entry && old_l2_bitmap == new_l2_bitmap) { 2009 continue; 2010 } 2011 2012 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2013 if (unmap) { 2014 qcow2_free_any_cluster(bs, old_l2_entry, QCOW2_DISCARD_REQUEST); 2015 } 2016 set_l2_entry(s, l2_slice, l2_index + i, new_l2_entry); 2017 if (has_subclusters(s)) { 2018 set_l2_bitmap(s, l2_slice, l2_index + i, new_l2_bitmap); 2019 } 2020 } 2021 2022 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2023 2024 return nb_clusters; 2025 } 2026 2027 static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset, 2028 unsigned nb_subclusters) 2029 { 2030 BDRVQcow2State *s = bs->opaque; 2031 uint64_t *l2_slice; 2032 uint64_t old_l2_bitmap, l2_bitmap; 2033 int l2_index, ret, sc = offset_to_sc_index(s, offset); 2034 2035 /* For full clusters use zero_in_l2_slice() instead */ 2036 assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster); 2037 assert(sc + nb_subclusters <= s->subclusters_per_cluster); 2038 assert(offset_into_subcluster(s, offset) == 0); 2039 2040 ret = get_cluster_table(bs, offset, &l2_slice, &l2_index); 2041 if (ret < 0) { 2042 return ret; 2043 } 2044 2045 switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) { 2046 case QCOW2_CLUSTER_COMPRESSED: 2047 ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */ 2048 goto out; 2049 case QCOW2_CLUSTER_NORMAL: 2050 case QCOW2_CLUSTER_UNALLOCATED: 2051 break; 2052 default: 2053 g_assert_not_reached(); 2054 } 2055 2056 old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index); 2057 2058 l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters); 2059 l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters); 2060 2061 if (old_l2_bitmap != l2_bitmap) { 2062 set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap); 2063 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2064 } 2065 2066 ret = 0; 2067 out: 2068 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2069 2070 return ret; 2071 } 2072 2073 int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, 2074 uint64_t bytes, int flags) 2075 { 2076 BDRVQcow2State *s = bs->opaque; 2077 uint64_t end_offset = offset + bytes; 2078 uint64_t nb_clusters; 2079 unsigned head, tail; 2080 int64_t cleared; 2081 int ret; 2082 2083 /* If we have to stay in sync with an external data file, zero out 2084 * s->data_file first. */ 2085 if (data_file_is_raw(bs)) { 2086 assert(has_data_file(bs)); 2087 ret = bdrv_co_pwrite_zeroes(s->data_file, offset, bytes, flags); 2088 if (ret < 0) { 2089 return ret; 2090 } 2091 } 2092 2093 /* Caller must pass aligned values, except at image end */ 2094 assert(offset_into_subcluster(s, offset) == 0); 2095 assert(offset_into_subcluster(s, end_offset) == 0 || 2096 end_offset >= bs->total_sectors << BDRV_SECTOR_BITS); 2097 2098 /* 2099 * The zero flag is only supported by version 3 and newer. However, if we 2100 * have no backing file, we can resort to discard in version 2. 2101 */ 2102 if (s->qcow_version < 3) { 2103 if (!bs->backing) { 2104 return qcow2_cluster_discard(bs, offset, bytes, 2105 QCOW2_DISCARD_REQUEST, false); 2106 } 2107 return -ENOTSUP; 2108 } 2109 2110 head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset; 2111 offset += head; 2112 2113 tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 : 2114 end_offset - MAX(offset, start_of_cluster(s, end_offset)); 2115 end_offset -= tail; 2116 2117 s->cache_discards = true; 2118 2119 if (head) { 2120 ret = zero_l2_subclusters(bs, offset - head, 2121 size_to_subclusters(s, head)); 2122 if (ret < 0) { 2123 goto fail; 2124 } 2125 } 2126 2127 /* Each L2 slice is handled by its own loop iteration */ 2128 nb_clusters = size_to_clusters(s, end_offset - offset); 2129 2130 while (nb_clusters > 0) { 2131 cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags); 2132 if (cleared < 0) { 2133 ret = cleared; 2134 goto fail; 2135 } 2136 2137 nb_clusters -= cleared; 2138 offset += (cleared * s->cluster_size); 2139 } 2140 2141 if (tail) { 2142 ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail)); 2143 if (ret < 0) { 2144 goto fail; 2145 } 2146 } 2147 2148 ret = 0; 2149 fail: 2150 s->cache_discards = false; 2151 qcow2_process_discards(bs, ret); 2152 2153 return ret; 2154 } 2155 2156 /* 2157 * Expands all zero clusters in a specific L1 table (or deallocates them, for 2158 * non-backed non-pre-allocated zero clusters). 2159 * 2160 * l1_entries and *visited_l1_entries are used to keep track of progress for 2161 * status_cb(). l1_entries contains the total number of L1 entries and 2162 * *visited_l1_entries counts all visited L1 entries. 2163 */ 2164 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 2165 int l1_size, int64_t *visited_l1_entries, 2166 int64_t l1_entries, 2167 BlockDriverAmendStatusCB *status_cb, 2168 void *cb_opaque) 2169 { 2170 BDRVQcow2State *s = bs->opaque; 2171 bool is_active_l1 = (l1_table == s->l1_table); 2172 uint64_t *l2_slice = NULL; 2173 unsigned slice, slice_size2, n_slices; 2174 int ret; 2175 int i, j; 2176 2177 /* qcow2_downgrade() is not allowed in images with subclusters */ 2178 assert(!has_subclusters(s)); 2179 2180 slice_size2 = s->l2_slice_size * l2_entry_size(s); 2181 n_slices = s->cluster_size / slice_size2; 2182 2183 if (!is_active_l1) { 2184 /* inactive L2 tables require a buffer to be stored in when loading 2185 * them from disk */ 2186 l2_slice = qemu_try_blockalign(bs->file->bs, slice_size2); 2187 if (l2_slice == NULL) { 2188 return -ENOMEM; 2189 } 2190 } 2191 2192 for (i = 0; i < l1_size; i++) { 2193 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 2194 uint64_t l2_refcount; 2195 2196 if (!l2_offset) { 2197 /* unallocated */ 2198 (*visited_l1_entries)++; 2199 if (status_cb) { 2200 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2201 } 2202 continue; 2203 } 2204 2205 if (offset_into_cluster(s, l2_offset)) { 2206 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 2207 PRIx64 " unaligned (L1 index: %#x)", 2208 l2_offset, i); 2209 ret = -EIO; 2210 goto fail; 2211 } 2212 2213 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 2214 &l2_refcount); 2215 if (ret < 0) { 2216 goto fail; 2217 } 2218 2219 for (slice = 0; slice < n_slices; slice++) { 2220 uint64_t slice_offset = l2_offset + slice * slice_size2; 2221 bool l2_dirty = false; 2222 if (is_active_l1) { 2223 /* get active L2 tables from cache */ 2224 ret = qcow2_cache_get(bs, s->l2_table_cache, slice_offset, 2225 (void **)&l2_slice); 2226 } else { 2227 /* load inactive L2 tables from disk */ 2228 ret = bdrv_pread(bs->file, slice_offset, l2_slice, slice_size2); 2229 } 2230 if (ret < 0) { 2231 goto fail; 2232 } 2233 2234 for (j = 0; j < s->l2_slice_size; j++) { 2235 uint64_t l2_entry = get_l2_entry(s, l2_slice, j); 2236 int64_t offset = l2_entry & L2E_OFFSET_MASK; 2237 QCow2ClusterType cluster_type = 2238 qcow2_get_cluster_type(bs, l2_entry); 2239 2240 if (cluster_type != QCOW2_CLUSTER_ZERO_PLAIN && 2241 cluster_type != QCOW2_CLUSTER_ZERO_ALLOC) { 2242 continue; 2243 } 2244 2245 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2246 if (!bs->backing) { 2247 /* 2248 * not backed; therefore we can simply deallocate the 2249 * cluster. No need to call set_l2_bitmap(), this 2250 * function doesn't support images with subclusters. 2251 */ 2252 set_l2_entry(s, l2_slice, j, 0); 2253 l2_dirty = true; 2254 continue; 2255 } 2256 2257 offset = qcow2_alloc_clusters(bs, s->cluster_size); 2258 if (offset < 0) { 2259 ret = offset; 2260 goto fail; 2261 } 2262 2263 /* The offset must fit in the offset field */ 2264 assert((offset & L2E_OFFSET_MASK) == offset); 2265 2266 if (l2_refcount > 1) { 2267 /* For shared L2 tables, set the refcount accordingly 2268 * (it is already 1 and needs to be l2_refcount) */ 2269 ret = qcow2_update_cluster_refcount( 2270 bs, offset >> s->cluster_bits, 2271 refcount_diff(1, l2_refcount), false, 2272 QCOW2_DISCARD_OTHER); 2273 if (ret < 0) { 2274 qcow2_free_clusters(bs, offset, s->cluster_size, 2275 QCOW2_DISCARD_OTHER); 2276 goto fail; 2277 } 2278 } 2279 } 2280 2281 if (offset_into_cluster(s, offset)) { 2282 int l2_index = slice * s->l2_slice_size + j; 2283 qcow2_signal_corruption( 2284 bs, true, -1, -1, 2285 "Cluster allocation offset " 2286 "%#" PRIx64 " unaligned (L2 offset: %#" 2287 PRIx64 ", L2 index: %#x)", offset, 2288 l2_offset, l2_index); 2289 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2290 qcow2_free_clusters(bs, offset, s->cluster_size, 2291 QCOW2_DISCARD_ALWAYS); 2292 } 2293 ret = -EIO; 2294 goto fail; 2295 } 2296 2297 ret = qcow2_pre_write_overlap_check(bs, 0, offset, 2298 s->cluster_size, true); 2299 if (ret < 0) { 2300 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2301 qcow2_free_clusters(bs, offset, s->cluster_size, 2302 QCOW2_DISCARD_ALWAYS); 2303 } 2304 goto fail; 2305 } 2306 2307 ret = bdrv_pwrite_zeroes(s->data_file, offset, 2308 s->cluster_size, 0); 2309 if (ret < 0) { 2310 if (cluster_type == QCOW2_CLUSTER_ZERO_PLAIN) { 2311 qcow2_free_clusters(bs, offset, s->cluster_size, 2312 QCOW2_DISCARD_ALWAYS); 2313 } 2314 goto fail; 2315 } 2316 2317 if (l2_refcount == 1) { 2318 set_l2_entry(s, l2_slice, j, offset | QCOW_OFLAG_COPIED); 2319 } else { 2320 set_l2_entry(s, l2_slice, j, offset); 2321 } 2322 /* 2323 * No need to call set_l2_bitmap() after set_l2_entry() because 2324 * this function doesn't support images with subclusters. 2325 */ 2326 l2_dirty = true; 2327 } 2328 2329 if (is_active_l1) { 2330 if (l2_dirty) { 2331 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice); 2332 qcow2_cache_depends_on_flush(s->l2_table_cache); 2333 } 2334 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2335 } else { 2336 if (l2_dirty) { 2337 ret = qcow2_pre_write_overlap_check( 2338 bs, QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, 2339 slice_offset, slice_size2, false); 2340 if (ret < 0) { 2341 goto fail; 2342 } 2343 2344 ret = bdrv_pwrite(bs->file, slice_offset, 2345 l2_slice, slice_size2); 2346 if (ret < 0) { 2347 goto fail; 2348 } 2349 } 2350 } 2351 } 2352 2353 (*visited_l1_entries)++; 2354 if (status_cb) { 2355 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 2356 } 2357 } 2358 2359 ret = 0; 2360 2361 fail: 2362 if (l2_slice) { 2363 if (!is_active_l1) { 2364 qemu_vfree(l2_slice); 2365 } else { 2366 qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice); 2367 } 2368 } 2369 return ret; 2370 } 2371 2372 /* 2373 * For backed images, expands all zero clusters on the image. For non-backed 2374 * images, deallocates all non-pre-allocated zero clusters (and claims the 2375 * allocation for pre-allocated ones). This is important for downgrading to a 2376 * qcow2 version which doesn't yet support metadata zero clusters. 2377 */ 2378 int qcow2_expand_zero_clusters(BlockDriverState *bs, 2379 BlockDriverAmendStatusCB *status_cb, 2380 void *cb_opaque) 2381 { 2382 BDRVQcow2State *s = bs->opaque; 2383 uint64_t *l1_table = NULL; 2384 int64_t l1_entries = 0, visited_l1_entries = 0; 2385 int ret; 2386 int i, j; 2387 2388 if (status_cb) { 2389 l1_entries = s->l1_size; 2390 for (i = 0; i < s->nb_snapshots; i++) { 2391 l1_entries += s->snapshots[i].l1_size; 2392 } 2393 } 2394 2395 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 2396 &visited_l1_entries, l1_entries, 2397 status_cb, cb_opaque); 2398 if (ret < 0) { 2399 goto fail; 2400 } 2401 2402 /* Inactive L1 tables may point to active L2 tables - therefore it is 2403 * necessary to flush the L2 table cache before trying to access the L2 2404 * tables pointed to by inactive L1 entries (else we might try to expand 2405 * zero clusters that have already been expanded); furthermore, it is also 2406 * necessary to empty the L2 table cache, since it may contain tables which 2407 * are now going to be modified directly on disk, bypassing the cache. 2408 * qcow2_cache_empty() does both for us. */ 2409 ret = qcow2_cache_empty(bs, s->l2_table_cache); 2410 if (ret < 0) { 2411 goto fail; 2412 } 2413 2414 for (i = 0; i < s->nb_snapshots; i++) { 2415 int l1_size2; 2416 uint64_t *new_l1_table; 2417 Error *local_err = NULL; 2418 2419 ret = qcow2_validate_table(bs, s->snapshots[i].l1_table_offset, 2420 s->snapshots[i].l1_size, L1E_SIZE, 2421 QCOW_MAX_L1_SIZE, "Snapshot L1 table", 2422 &local_err); 2423 if (ret < 0) { 2424 error_report_err(local_err); 2425 goto fail; 2426 } 2427 2428 l1_size2 = s->snapshots[i].l1_size * L1E_SIZE; 2429 new_l1_table = g_try_realloc(l1_table, l1_size2); 2430 2431 if (!new_l1_table) { 2432 ret = -ENOMEM; 2433 goto fail; 2434 } 2435 2436 l1_table = new_l1_table; 2437 2438 ret = bdrv_pread(bs->file, s->snapshots[i].l1_table_offset, 2439 l1_table, l1_size2); 2440 if (ret < 0) { 2441 goto fail; 2442 } 2443 2444 for (j = 0; j < s->snapshots[i].l1_size; j++) { 2445 be64_to_cpus(&l1_table[j]); 2446 } 2447 2448 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 2449 &visited_l1_entries, l1_entries, 2450 status_cb, cb_opaque); 2451 if (ret < 0) { 2452 goto fail; 2453 } 2454 } 2455 2456 ret = 0; 2457 2458 fail: 2459 g_free(l1_table); 2460 return ret; 2461 } 2462