1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "block/qcow2.h" 32 #include "qemu/bswap.h" 33 #include "trace.h" 34 35 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 36 bool exact_size) 37 { 38 BDRVQcow2State *s = bs->opaque; 39 int new_l1_size2, ret, i; 40 uint64_t *new_l1_table; 41 int64_t old_l1_table_offset, old_l1_size; 42 int64_t new_l1_table_offset, new_l1_size; 43 uint8_t data[12]; 44 45 if (min_size <= s->l1_size) 46 return 0; 47 48 /* Do a sanity check on min_size before trying to calculate new_l1_size 49 * (this prevents overflows during the while loop for the calculation of 50 * new_l1_size) */ 51 if (min_size > INT_MAX / sizeof(uint64_t)) { 52 return -EFBIG; 53 } 54 55 if (exact_size) { 56 new_l1_size = min_size; 57 } else { 58 /* Bump size up to reduce the number of times we have to grow */ 59 new_l1_size = s->l1_size; 60 if (new_l1_size == 0) { 61 new_l1_size = 1; 62 } 63 while (min_size > new_l1_size) { 64 new_l1_size = (new_l1_size * 3 + 1) / 2; 65 } 66 } 67 68 QEMU_BUILD_BUG_ON(QCOW_MAX_L1_SIZE > INT_MAX); 69 if (new_l1_size > QCOW_MAX_L1_SIZE / sizeof(uint64_t)) { 70 return -EFBIG; 71 } 72 73 #ifdef DEBUG_ALLOC2 74 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 75 s->l1_size, new_l1_size); 76 #endif 77 78 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 79 new_l1_table = qemu_try_blockalign(bs->file->bs, 80 align_offset(new_l1_size2, 512)); 81 if (new_l1_table == NULL) { 82 return -ENOMEM; 83 } 84 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 85 86 if (s->l1_size) { 87 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 88 } 89 90 /* write new table (align to cluster) */ 91 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 92 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 93 if (new_l1_table_offset < 0) { 94 qemu_vfree(new_l1_table); 95 return new_l1_table_offset; 96 } 97 98 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 99 if (ret < 0) { 100 goto fail; 101 } 102 103 /* the L1 position has not yet been updated, so these clusters must 104 * indeed be completely free */ 105 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 106 new_l1_size2); 107 if (ret < 0) { 108 goto fail; 109 } 110 111 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 112 for(i = 0; i < s->l1_size; i++) 113 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 114 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, 115 new_l1_table, new_l1_size2); 116 if (ret < 0) 117 goto fail; 118 for(i = 0; i < s->l1_size; i++) 119 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 120 121 /* set new table */ 122 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 123 stl_be_p(data, new_l1_size); 124 stq_be_p(data + 4, new_l1_table_offset); 125 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), 126 data, sizeof(data)); 127 if (ret < 0) { 128 goto fail; 129 } 130 qemu_vfree(s->l1_table); 131 old_l1_table_offset = s->l1_table_offset; 132 s->l1_table_offset = new_l1_table_offset; 133 s->l1_table = new_l1_table; 134 old_l1_size = s->l1_size; 135 s->l1_size = new_l1_size; 136 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 137 QCOW2_DISCARD_OTHER); 138 return 0; 139 fail: 140 qemu_vfree(new_l1_table); 141 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 142 QCOW2_DISCARD_OTHER); 143 return ret; 144 } 145 146 /* 147 * l2_load 148 * 149 * Loads a L2 table into memory. If the table is in the cache, the cache 150 * is used; otherwise the L2 table is loaded from the image file. 151 * 152 * Returns a pointer to the L2 table on success, or NULL if the read from 153 * the image file failed. 154 */ 155 156 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 157 uint64_t **l2_table) 158 { 159 BDRVQcow2State *s = bs->opaque; 160 161 return qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 162 (void **)l2_table); 163 } 164 165 /* 166 * Writes one sector of the L1 table to the disk (can't update single entries 167 * and we really don't want bdrv_pread to perform a read-modify-write) 168 */ 169 #define L1_ENTRIES_PER_SECTOR (512 / 8) 170 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 171 { 172 BDRVQcow2State *s = bs->opaque; 173 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 174 int l1_start_index; 175 int i, ret; 176 177 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 178 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 179 i++) 180 { 181 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 182 } 183 184 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 185 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 186 if (ret < 0) { 187 return ret; 188 } 189 190 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 191 ret = bdrv_pwrite_sync(bs->file, 192 s->l1_table_offset + 8 * l1_start_index, 193 buf, sizeof(buf)); 194 if (ret < 0) { 195 return ret; 196 } 197 198 return 0; 199 } 200 201 /* 202 * l2_allocate 203 * 204 * Allocate a new l2 entry in the file. If l1_index points to an already 205 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 206 * table) copy the contents of the old L2 table into the newly allocated one. 207 * Otherwise the new table is initialized with zeros. 208 * 209 */ 210 211 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 212 { 213 BDRVQcow2State *s = bs->opaque; 214 uint64_t old_l2_offset; 215 uint64_t *l2_table = NULL; 216 int64_t l2_offset; 217 int ret; 218 219 old_l2_offset = s->l1_table[l1_index]; 220 221 trace_qcow2_l2_allocate(bs, l1_index); 222 223 /* allocate a new l2 entry */ 224 225 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 226 if (l2_offset < 0) { 227 ret = l2_offset; 228 goto fail; 229 } 230 231 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 232 if (ret < 0) { 233 goto fail; 234 } 235 236 /* allocate a new entry in the l2 cache */ 237 238 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 239 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 240 if (ret < 0) { 241 goto fail; 242 } 243 244 l2_table = *table; 245 246 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 247 /* if there was no old l2 table, clear the new table */ 248 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 249 } else { 250 uint64_t* old_table; 251 252 /* if there was an old l2 table, read it from the disk */ 253 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 254 ret = qcow2_cache_get(bs, s->l2_table_cache, 255 old_l2_offset & L1E_OFFSET_MASK, 256 (void**) &old_table); 257 if (ret < 0) { 258 goto fail; 259 } 260 261 memcpy(l2_table, old_table, s->cluster_size); 262 263 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 264 } 265 266 /* write the l2 table to the file */ 267 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 268 269 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 270 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 271 ret = qcow2_cache_flush(bs, s->l2_table_cache); 272 if (ret < 0) { 273 goto fail; 274 } 275 276 /* update the L1 entry */ 277 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 278 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 279 ret = qcow2_write_l1_entry(bs, l1_index); 280 if (ret < 0) { 281 goto fail; 282 } 283 284 *table = l2_table; 285 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 286 return 0; 287 288 fail: 289 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 290 if (l2_table != NULL) { 291 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 292 } 293 s->l1_table[l1_index] = old_l2_offset; 294 if (l2_offset > 0) { 295 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 296 QCOW2_DISCARD_ALWAYS); 297 } 298 return ret; 299 } 300 301 /* 302 * Checks how many clusters in a given L2 table are contiguous in the image 303 * file. As soon as one of the flags in the bitmask stop_flags changes compared 304 * to the first cluster, the search is stopped and the cluster is not counted 305 * as contiguous. (This allows it, for example, to stop at the first compressed 306 * cluster which may require a different handling) 307 */ 308 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 309 uint64_t *l2_table, uint64_t stop_flags) 310 { 311 int i; 312 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 313 uint64_t first_entry = be64_to_cpu(l2_table[0]); 314 uint64_t offset = first_entry & mask; 315 316 if (!offset) 317 return 0; 318 319 assert(qcow2_get_cluster_type(first_entry) == QCOW2_CLUSTER_NORMAL); 320 321 for (i = 0; i < nb_clusters; i++) { 322 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 323 if (offset + (uint64_t) i * cluster_size != l2_entry) { 324 break; 325 } 326 } 327 328 return i; 329 } 330 331 static int count_contiguous_clusters_by_type(int nb_clusters, 332 uint64_t *l2_table, 333 int wanted_type) 334 { 335 int i; 336 337 for (i = 0; i < nb_clusters; i++) { 338 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 339 340 if (type != wanted_type) { 341 break; 342 } 343 } 344 345 return i; 346 } 347 348 /* The crypt function is compatible with the linux cryptoloop 349 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 350 supported */ 351 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, 352 uint8_t *out_buf, const uint8_t *in_buf, 353 int nb_sectors, bool enc, 354 Error **errp) 355 { 356 union { 357 uint64_t ll[2]; 358 uint8_t b[16]; 359 } ivec; 360 int i; 361 int ret; 362 363 for(i = 0; i < nb_sectors; i++) { 364 ivec.ll[0] = cpu_to_le64(sector_num); 365 ivec.ll[1] = 0; 366 if (qcrypto_cipher_setiv(s->cipher, 367 ivec.b, G_N_ELEMENTS(ivec.b), 368 errp) < 0) { 369 return -1; 370 } 371 if (enc) { 372 ret = qcrypto_cipher_encrypt(s->cipher, 373 in_buf, 374 out_buf, 375 512, 376 errp); 377 } else { 378 ret = qcrypto_cipher_decrypt(s->cipher, 379 in_buf, 380 out_buf, 381 512, 382 errp); 383 } 384 if (ret < 0) { 385 return -1; 386 } 387 sector_num++; 388 in_buf += 512; 389 out_buf += 512; 390 } 391 return 0; 392 } 393 394 static int coroutine_fn do_perform_cow(BlockDriverState *bs, 395 uint64_t src_cluster_offset, 396 uint64_t cluster_offset, 397 int offset_in_cluster, 398 int bytes) 399 { 400 BDRVQcow2State *s = bs->opaque; 401 QEMUIOVector qiov; 402 struct iovec iov; 403 int ret; 404 405 iov.iov_len = bytes; 406 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 407 if (iov.iov_base == NULL) { 408 return -ENOMEM; 409 } 410 411 qemu_iovec_init_external(&qiov, &iov, 1); 412 413 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 414 415 if (!bs->drv) { 416 ret = -ENOMEDIUM; 417 goto out; 418 } 419 420 /* Call .bdrv_co_readv() directly instead of using the public block-layer 421 * interface. This avoids double I/O throttling and request tracking, 422 * which can lead to deadlock when block layer copy-on-read is enabled. 423 */ 424 ret = bs->drv->bdrv_co_preadv(bs, src_cluster_offset + offset_in_cluster, 425 bytes, &qiov, 0); 426 if (ret < 0) { 427 goto out; 428 } 429 430 if (bs->encrypted) { 431 Error *err = NULL; 432 int64_t sector = (cluster_offset + offset_in_cluster) 433 >> BDRV_SECTOR_BITS; 434 assert(s->cipher); 435 assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0); 436 assert((bytes & ~BDRV_SECTOR_MASK) == 0); 437 if (qcow2_encrypt_sectors(s, sector, iov.iov_base, iov.iov_base, 438 bytes >> BDRV_SECTOR_BITS, true, &err) < 0) { 439 ret = -EIO; 440 error_free(err); 441 goto out; 442 } 443 } 444 445 ret = qcow2_pre_write_overlap_check(bs, 0, 446 cluster_offset + offset_in_cluster, bytes); 447 if (ret < 0) { 448 goto out; 449 } 450 451 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 452 ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster, 453 bytes, &qiov, 0); 454 if (ret < 0) { 455 goto out; 456 } 457 458 ret = 0; 459 out: 460 qemu_vfree(iov.iov_base); 461 return ret; 462 } 463 464 465 /* 466 * get_cluster_offset 467 * 468 * For a given offset of the virtual disk, find the cluster type and offset in 469 * the qcow2 file. The offset is stored in *cluster_offset. 470 * 471 * On entry, *bytes is the maximum number of contiguous bytes starting at 472 * offset that we are interested in. 473 * 474 * On exit, *bytes is the number of bytes starting at offset that have the same 475 * cluster type and (if applicable) are stored contiguously in the image file. 476 * Compressed clusters are always returned one by one. 477 * 478 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 479 * cases. 480 */ 481 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 482 unsigned int *bytes, uint64_t *cluster_offset) 483 { 484 BDRVQcow2State *s = bs->opaque; 485 unsigned int l2_index; 486 uint64_t l1_index, l2_offset, *l2_table; 487 int l1_bits, c; 488 unsigned int offset_in_cluster; 489 uint64_t bytes_available, bytes_needed, nb_clusters; 490 int ret; 491 492 offset_in_cluster = offset_into_cluster(s, offset); 493 bytes_needed = (uint64_t) *bytes + offset_in_cluster; 494 495 l1_bits = s->l2_bits + s->cluster_bits; 496 497 /* compute how many bytes there are between the start of the cluster 498 * containing offset and the end of the l1 entry */ 499 bytes_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)) 500 + offset_in_cluster; 501 502 if (bytes_needed > bytes_available) { 503 bytes_needed = bytes_available; 504 } 505 506 *cluster_offset = 0; 507 508 /* seek to the l2 offset in the l1 table */ 509 510 l1_index = offset >> l1_bits; 511 if (l1_index >= s->l1_size) { 512 ret = QCOW2_CLUSTER_UNALLOCATED; 513 goto out; 514 } 515 516 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 517 if (!l2_offset) { 518 ret = QCOW2_CLUSTER_UNALLOCATED; 519 goto out; 520 } 521 522 if (offset_into_cluster(s, l2_offset)) { 523 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 524 " unaligned (L1 index: %#" PRIx64 ")", 525 l2_offset, l1_index); 526 return -EIO; 527 } 528 529 /* load the l2 table in memory */ 530 531 ret = l2_load(bs, l2_offset, &l2_table); 532 if (ret < 0) { 533 return ret; 534 } 535 536 /* find the cluster offset for the given disk offset */ 537 538 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 539 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 540 541 nb_clusters = size_to_clusters(s, bytes_needed); 542 /* bytes_needed <= *bytes + offset_in_cluster, both of which are unsigned 543 * integers; the minimum cluster size is 512, so this assertion is always 544 * true */ 545 assert(nb_clusters <= INT_MAX); 546 547 ret = qcow2_get_cluster_type(*cluster_offset); 548 switch (ret) { 549 case QCOW2_CLUSTER_COMPRESSED: 550 /* Compressed clusters can only be processed one by one */ 551 c = 1; 552 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 553 break; 554 case QCOW2_CLUSTER_ZERO: 555 if (s->qcow_version < 3) { 556 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 557 " in pre-v3 image (L2 offset: %#" PRIx64 558 ", L2 index: %#x)", l2_offset, l2_index); 559 ret = -EIO; 560 goto fail; 561 } 562 c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], 563 QCOW2_CLUSTER_ZERO); 564 *cluster_offset = 0; 565 break; 566 case QCOW2_CLUSTER_UNALLOCATED: 567 /* how many empty clusters ? */ 568 c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], 569 QCOW2_CLUSTER_UNALLOCATED); 570 *cluster_offset = 0; 571 break; 572 case QCOW2_CLUSTER_NORMAL: 573 /* how many allocated clusters ? */ 574 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 575 &l2_table[l2_index], QCOW_OFLAG_ZERO); 576 *cluster_offset &= L2E_OFFSET_MASK; 577 if (offset_into_cluster(s, *cluster_offset)) { 578 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 579 PRIx64 " unaligned (L2 offset: %#" PRIx64 580 ", L2 index: %#x)", *cluster_offset, 581 l2_offset, l2_index); 582 ret = -EIO; 583 goto fail; 584 } 585 break; 586 default: 587 abort(); 588 } 589 590 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 591 592 bytes_available = (int64_t)c * s->cluster_size; 593 594 out: 595 if (bytes_available > bytes_needed) { 596 bytes_available = bytes_needed; 597 } 598 599 /* bytes_available <= bytes_needed <= *bytes + offset_in_cluster; 600 * subtracting offset_in_cluster will therefore definitely yield something 601 * not exceeding UINT_MAX */ 602 assert(bytes_available - offset_in_cluster <= UINT_MAX); 603 *bytes = bytes_available - offset_in_cluster; 604 605 return ret; 606 607 fail: 608 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 609 return ret; 610 } 611 612 /* 613 * get_cluster_table 614 * 615 * for a given disk offset, load (and allocate if needed) 616 * the l2 table. 617 * 618 * the l2 table offset in the qcow2 file and the cluster index 619 * in the l2 table are given to the caller. 620 * 621 * Returns 0 on success, -errno in failure case 622 */ 623 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 624 uint64_t **new_l2_table, 625 int *new_l2_index) 626 { 627 BDRVQcow2State *s = bs->opaque; 628 unsigned int l2_index; 629 uint64_t l1_index, l2_offset; 630 uint64_t *l2_table = NULL; 631 int ret; 632 633 /* seek to the l2 offset in the l1 table */ 634 635 l1_index = offset >> (s->l2_bits + s->cluster_bits); 636 if (l1_index >= s->l1_size) { 637 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 638 if (ret < 0) { 639 return ret; 640 } 641 } 642 643 assert(l1_index < s->l1_size); 644 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 645 if (offset_into_cluster(s, l2_offset)) { 646 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 647 " unaligned (L1 index: %#" PRIx64 ")", 648 l2_offset, l1_index); 649 return -EIO; 650 } 651 652 /* seek the l2 table of the given l2 offset */ 653 654 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 655 /* load the l2 table in memory */ 656 ret = l2_load(bs, l2_offset, &l2_table); 657 if (ret < 0) { 658 return ret; 659 } 660 } else { 661 /* First allocate a new L2 table (and do COW if needed) */ 662 ret = l2_allocate(bs, l1_index, &l2_table); 663 if (ret < 0) { 664 return ret; 665 } 666 667 /* Then decrease the refcount of the old table */ 668 if (l2_offset) { 669 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 670 QCOW2_DISCARD_OTHER); 671 } 672 } 673 674 /* find the cluster offset for the given disk offset */ 675 676 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 677 678 *new_l2_table = l2_table; 679 *new_l2_index = l2_index; 680 681 return 0; 682 } 683 684 /* 685 * alloc_compressed_cluster_offset 686 * 687 * For a given offset of the disk image, return cluster offset in 688 * qcow2 file. 689 * 690 * If the offset is not found, allocate a new compressed cluster. 691 * 692 * Return the cluster offset if successful, 693 * Return 0, otherwise. 694 * 695 */ 696 697 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 698 uint64_t offset, 699 int compressed_size) 700 { 701 BDRVQcow2State *s = bs->opaque; 702 int l2_index, ret; 703 uint64_t *l2_table; 704 int64_t cluster_offset; 705 int nb_csectors; 706 707 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 708 if (ret < 0) { 709 return 0; 710 } 711 712 /* Compression can't overwrite anything. Fail if the cluster was already 713 * allocated. */ 714 cluster_offset = be64_to_cpu(l2_table[l2_index]); 715 if (cluster_offset & L2E_OFFSET_MASK) { 716 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 717 return 0; 718 } 719 720 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 721 if (cluster_offset < 0) { 722 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 723 return 0; 724 } 725 726 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 727 (cluster_offset >> 9); 728 729 cluster_offset |= QCOW_OFLAG_COMPRESSED | 730 ((uint64_t)nb_csectors << s->csize_shift); 731 732 /* update L2 table */ 733 734 /* compressed clusters never have the copied flag */ 735 736 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 737 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 738 l2_table[l2_index] = cpu_to_be64(cluster_offset); 739 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 740 741 return cluster_offset; 742 } 743 744 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 745 { 746 BDRVQcow2State *s = bs->opaque; 747 int ret; 748 749 if (r->nb_bytes == 0) { 750 return 0; 751 } 752 753 qemu_co_mutex_unlock(&s->lock); 754 ret = do_perform_cow(bs, m->offset, m->alloc_offset, r->offset, r->nb_bytes); 755 qemu_co_mutex_lock(&s->lock); 756 757 if (ret < 0) { 758 return ret; 759 } 760 761 /* 762 * Before we update the L2 table to actually point to the new cluster, we 763 * need to be sure that the refcounts have been increased and COW was 764 * handled. 765 */ 766 qcow2_cache_depends_on_flush(s->l2_table_cache); 767 768 return 0; 769 } 770 771 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 772 { 773 BDRVQcow2State *s = bs->opaque; 774 int i, j = 0, l2_index, ret; 775 uint64_t *old_cluster, *l2_table; 776 uint64_t cluster_offset = m->alloc_offset; 777 778 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 779 assert(m->nb_clusters > 0); 780 781 old_cluster = g_try_new(uint64_t, m->nb_clusters); 782 if (old_cluster == NULL) { 783 ret = -ENOMEM; 784 goto err; 785 } 786 787 /* copy content of unmodified sectors */ 788 ret = perform_cow(bs, m, &m->cow_start); 789 if (ret < 0) { 790 goto err; 791 } 792 793 ret = perform_cow(bs, m, &m->cow_end); 794 if (ret < 0) { 795 goto err; 796 } 797 798 /* Update L2 table. */ 799 if (s->use_lazy_refcounts) { 800 qcow2_mark_dirty(bs); 801 } 802 if (qcow2_need_accurate_refcounts(s)) { 803 qcow2_cache_set_dependency(bs, s->l2_table_cache, 804 s->refcount_block_cache); 805 } 806 807 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 808 if (ret < 0) { 809 goto err; 810 } 811 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 812 813 assert(l2_index + m->nb_clusters <= s->l2_size); 814 for (i = 0; i < m->nb_clusters; i++) { 815 /* if two concurrent writes happen to the same unallocated cluster 816 * each write allocates separate cluster and writes data concurrently. 817 * The first one to complete updates l2 table with pointer to its 818 * cluster the second one has to do RMW (which is done above by 819 * perform_cow()), update l2 table with its cluster pointer and free 820 * old cluster. This is what this loop does */ 821 if (l2_table[l2_index + i] != 0) { 822 old_cluster[j++] = l2_table[l2_index + i]; 823 } 824 825 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 826 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 827 } 828 829 830 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 831 832 /* 833 * If this was a COW, we need to decrease the refcount of the old cluster. 834 * 835 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 836 * clusters), the next write will reuse them anyway. 837 */ 838 if (j != 0) { 839 for (i = 0; i < j; i++) { 840 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 841 QCOW2_DISCARD_NEVER); 842 } 843 } 844 845 ret = 0; 846 err: 847 g_free(old_cluster); 848 return ret; 849 } 850 851 /* 852 * Returns the number of contiguous clusters that can be used for an allocating 853 * write, but require COW to be performed (this includes yet unallocated space, 854 * which must copy from the backing file) 855 */ 856 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 857 uint64_t *l2_table, int l2_index) 858 { 859 int i; 860 861 for (i = 0; i < nb_clusters; i++) { 862 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 863 int cluster_type = qcow2_get_cluster_type(l2_entry); 864 865 switch(cluster_type) { 866 case QCOW2_CLUSTER_NORMAL: 867 if (l2_entry & QCOW_OFLAG_COPIED) { 868 goto out; 869 } 870 break; 871 case QCOW2_CLUSTER_UNALLOCATED: 872 case QCOW2_CLUSTER_COMPRESSED: 873 case QCOW2_CLUSTER_ZERO: 874 break; 875 default: 876 abort(); 877 } 878 } 879 880 out: 881 assert(i <= nb_clusters); 882 return i; 883 } 884 885 /* 886 * Check if there already is an AIO write request in flight which allocates 887 * the same cluster. In this case we need to wait until the previous 888 * request has completed and updated the L2 table accordingly. 889 * 890 * Returns: 891 * 0 if there was no dependency. *cur_bytes indicates the number of 892 * bytes from guest_offset that can be read before the next 893 * dependency must be processed (or the request is complete) 894 * 895 * -EAGAIN if we had to wait for another request, previously gathered 896 * information on cluster allocation may be invalid now. The caller 897 * must start over anyway, so consider *cur_bytes undefined. 898 */ 899 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 900 uint64_t *cur_bytes, QCowL2Meta **m) 901 { 902 BDRVQcow2State *s = bs->opaque; 903 QCowL2Meta *old_alloc; 904 uint64_t bytes = *cur_bytes; 905 906 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 907 908 uint64_t start = guest_offset; 909 uint64_t end = start + bytes; 910 uint64_t old_start = l2meta_cow_start(old_alloc); 911 uint64_t old_end = l2meta_cow_end(old_alloc); 912 913 if (end <= old_start || start >= old_end) { 914 /* No intersection */ 915 } else { 916 if (start < old_start) { 917 /* Stop at the start of a running allocation */ 918 bytes = old_start - start; 919 } else { 920 bytes = 0; 921 } 922 923 /* Stop if already an l2meta exists. After yielding, it wouldn't 924 * be valid any more, so we'd have to clean up the old L2Metas 925 * and deal with requests depending on them before starting to 926 * gather new ones. Not worth the trouble. */ 927 if (bytes == 0 && *m) { 928 *cur_bytes = 0; 929 return 0; 930 } 931 932 if (bytes == 0) { 933 /* Wait for the dependency to complete. We need to recheck 934 * the free/allocated clusters when we continue. */ 935 qemu_co_mutex_unlock(&s->lock); 936 qemu_co_queue_wait(&old_alloc->dependent_requests); 937 qemu_co_mutex_lock(&s->lock); 938 return -EAGAIN; 939 } 940 } 941 } 942 943 /* Make sure that existing clusters and new allocations are only used up to 944 * the next dependency if we shortened the request above */ 945 *cur_bytes = bytes; 946 947 return 0; 948 } 949 950 /* 951 * Checks how many already allocated clusters that don't require a copy on 952 * write there are at the given guest_offset (up to *bytes). If 953 * *host_offset is not zero, only physically contiguous clusters beginning at 954 * this host offset are counted. 955 * 956 * Note that guest_offset may not be cluster aligned. In this case, the 957 * returned *host_offset points to exact byte referenced by guest_offset and 958 * therefore isn't cluster aligned as well. 959 * 960 * Returns: 961 * 0: if no allocated clusters are available at the given offset. 962 * *bytes is normally unchanged. It is set to 0 if the cluster 963 * is allocated and doesn't need COW, but doesn't have the right 964 * physical offset. 965 * 966 * 1: if allocated clusters that don't require a COW are available at 967 * the requested offset. *bytes may have decreased and describes 968 * the length of the area that can be written to. 969 * 970 * -errno: in error cases 971 */ 972 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 973 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 974 { 975 BDRVQcow2State *s = bs->opaque; 976 int l2_index; 977 uint64_t cluster_offset; 978 uint64_t *l2_table; 979 uint64_t nb_clusters; 980 unsigned int keep_clusters; 981 int ret; 982 983 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 984 *bytes); 985 986 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 987 == offset_into_cluster(s, *host_offset)); 988 989 /* 990 * Calculate the number of clusters to look for. We stop at L2 table 991 * boundaries to keep things simple. 992 */ 993 nb_clusters = 994 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 995 996 l2_index = offset_to_l2_index(s, guest_offset); 997 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 998 assert(nb_clusters <= INT_MAX); 999 1000 /* Find L2 entry for the first involved cluster */ 1001 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1002 if (ret < 0) { 1003 return ret; 1004 } 1005 1006 cluster_offset = be64_to_cpu(l2_table[l2_index]); 1007 1008 /* Check how many clusters are already allocated and don't need COW */ 1009 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1010 && (cluster_offset & QCOW_OFLAG_COPIED)) 1011 { 1012 /* If a specific host_offset is required, check it */ 1013 bool offset_matches = 1014 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1015 1016 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1017 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1018 "%#llx unaligned (guest offset: %#" PRIx64 1019 ")", cluster_offset & L2E_OFFSET_MASK, 1020 guest_offset); 1021 ret = -EIO; 1022 goto out; 1023 } 1024 1025 if (*host_offset != 0 && !offset_matches) { 1026 *bytes = 0; 1027 ret = 0; 1028 goto out; 1029 } 1030 1031 /* We keep all QCOW_OFLAG_COPIED clusters */ 1032 keep_clusters = 1033 count_contiguous_clusters(nb_clusters, s->cluster_size, 1034 &l2_table[l2_index], 1035 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1036 assert(keep_clusters <= nb_clusters); 1037 1038 *bytes = MIN(*bytes, 1039 keep_clusters * s->cluster_size 1040 - offset_into_cluster(s, guest_offset)); 1041 1042 ret = 1; 1043 } else { 1044 ret = 0; 1045 } 1046 1047 /* Cleanup */ 1048 out: 1049 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1050 1051 /* Only return a host offset if we actually made progress. Otherwise we 1052 * would make requirements for handle_alloc() that it can't fulfill */ 1053 if (ret > 0) { 1054 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1055 + offset_into_cluster(s, guest_offset); 1056 } 1057 1058 return ret; 1059 } 1060 1061 /* 1062 * Allocates new clusters for the given guest_offset. 1063 * 1064 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1065 * contain the number of clusters that have been allocated and are contiguous 1066 * in the image file. 1067 * 1068 * If *host_offset is non-zero, it specifies the offset in the image file at 1069 * which the new clusters must start. *nb_clusters can be 0 on return in this 1070 * case if the cluster at host_offset is already in use. If *host_offset is 1071 * zero, the clusters can be allocated anywhere in the image file. 1072 * 1073 * *host_offset is updated to contain the offset into the image file at which 1074 * the first allocated cluster starts. 1075 * 1076 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1077 * function has been waiting for another request and the allocation must be 1078 * restarted, but the whole request should not be failed. 1079 */ 1080 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1081 uint64_t *host_offset, uint64_t *nb_clusters) 1082 { 1083 BDRVQcow2State *s = bs->opaque; 1084 1085 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1086 *host_offset, *nb_clusters); 1087 1088 /* Allocate new clusters */ 1089 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1090 if (*host_offset == 0) { 1091 int64_t cluster_offset = 1092 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1093 if (cluster_offset < 0) { 1094 return cluster_offset; 1095 } 1096 *host_offset = cluster_offset; 1097 return 0; 1098 } else { 1099 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1100 if (ret < 0) { 1101 return ret; 1102 } 1103 *nb_clusters = ret; 1104 return 0; 1105 } 1106 } 1107 1108 /* 1109 * Allocates new clusters for an area that either is yet unallocated or needs a 1110 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1111 * the new allocation can match the specified host offset. 1112 * 1113 * Note that guest_offset may not be cluster aligned. In this case, the 1114 * returned *host_offset points to exact byte referenced by guest_offset and 1115 * therefore isn't cluster aligned as well. 1116 * 1117 * Returns: 1118 * 0: if no clusters could be allocated. *bytes is set to 0, 1119 * *host_offset is left unchanged. 1120 * 1121 * 1: if new clusters were allocated. *bytes may be decreased if the 1122 * new allocation doesn't cover all of the requested area. 1123 * *host_offset is updated to contain the host offset of the first 1124 * newly allocated cluster. 1125 * 1126 * -errno: in error cases 1127 */ 1128 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1129 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1130 { 1131 BDRVQcow2State *s = bs->opaque; 1132 int l2_index; 1133 uint64_t *l2_table; 1134 uint64_t entry; 1135 uint64_t nb_clusters; 1136 int ret; 1137 1138 uint64_t alloc_cluster_offset; 1139 1140 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1141 *bytes); 1142 assert(*bytes > 0); 1143 1144 /* 1145 * Calculate the number of clusters to look for. We stop at L2 table 1146 * boundaries to keep things simple. 1147 */ 1148 nb_clusters = 1149 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1150 1151 l2_index = offset_to_l2_index(s, guest_offset); 1152 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1153 assert(nb_clusters <= INT_MAX); 1154 1155 /* Find L2 entry for the first involved cluster */ 1156 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1157 if (ret < 0) { 1158 return ret; 1159 } 1160 1161 entry = be64_to_cpu(l2_table[l2_index]); 1162 1163 /* For the moment, overwrite compressed clusters one by one */ 1164 if (entry & QCOW_OFLAG_COMPRESSED) { 1165 nb_clusters = 1; 1166 } else { 1167 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1168 } 1169 1170 /* This function is only called when there were no non-COW clusters, so if 1171 * we can't find any unallocated or COW clusters either, something is 1172 * wrong with our code. */ 1173 assert(nb_clusters > 0); 1174 1175 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1176 1177 /* Allocate, if necessary at a given offset in the image file */ 1178 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1179 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1180 &nb_clusters); 1181 if (ret < 0) { 1182 goto fail; 1183 } 1184 1185 /* Can't extend contiguous allocation */ 1186 if (nb_clusters == 0) { 1187 *bytes = 0; 1188 return 0; 1189 } 1190 1191 /* !*host_offset would overwrite the image header and is reserved for "no 1192 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1193 * following overlap check; do that now to avoid having an invalid value in 1194 * *host_offset. */ 1195 if (!alloc_cluster_offset) { 1196 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1197 nb_clusters * s->cluster_size); 1198 assert(ret < 0); 1199 goto fail; 1200 } 1201 1202 /* 1203 * Save info needed for meta data update. 1204 * 1205 * requested_bytes: Number of bytes from the start of the first 1206 * newly allocated cluster to the end of the (possibly shortened 1207 * before) write request. 1208 * 1209 * avail_bytes: Number of bytes from the start of the first 1210 * newly allocated to the end of the last newly allocated cluster. 1211 * 1212 * nb_bytes: The number of bytes from the start of the first 1213 * newly allocated cluster to the end of the area that the write 1214 * request actually writes to (excluding COW at the end) 1215 */ 1216 uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset); 1217 int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits); 1218 int nb_bytes = MIN(requested_bytes, avail_bytes); 1219 QCowL2Meta *old_m = *m; 1220 1221 *m = g_malloc0(sizeof(**m)); 1222 1223 **m = (QCowL2Meta) { 1224 .next = old_m, 1225 1226 .alloc_offset = alloc_cluster_offset, 1227 .offset = start_of_cluster(s, guest_offset), 1228 .nb_clusters = nb_clusters, 1229 1230 .cow_start = { 1231 .offset = 0, 1232 .nb_bytes = offset_into_cluster(s, guest_offset), 1233 }, 1234 .cow_end = { 1235 .offset = nb_bytes, 1236 .nb_bytes = avail_bytes - nb_bytes, 1237 }, 1238 }; 1239 qemu_co_queue_init(&(*m)->dependent_requests); 1240 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1241 1242 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1243 *bytes = MIN(*bytes, nb_bytes - offset_into_cluster(s, guest_offset)); 1244 assert(*bytes != 0); 1245 1246 return 1; 1247 1248 fail: 1249 if (*m && (*m)->nb_clusters > 0) { 1250 QLIST_REMOVE(*m, next_in_flight); 1251 } 1252 return ret; 1253 } 1254 1255 /* 1256 * alloc_cluster_offset 1257 * 1258 * For a given offset on the virtual disk, find the cluster offset in qcow2 1259 * file. If the offset is not found, allocate a new cluster. 1260 * 1261 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1262 * other fields in m are meaningless. 1263 * 1264 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1265 * contiguous clusters that have been allocated. In this case, the other 1266 * fields of m are valid and contain information about the first allocated 1267 * cluster. 1268 * 1269 * If the request conflicts with another write request in flight, the coroutine 1270 * is queued and will be reentered when the dependency has completed. 1271 * 1272 * Return 0 on success and -errno in error cases 1273 */ 1274 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1275 unsigned int *bytes, uint64_t *host_offset, 1276 QCowL2Meta **m) 1277 { 1278 BDRVQcow2State *s = bs->opaque; 1279 uint64_t start, remaining; 1280 uint64_t cluster_offset; 1281 uint64_t cur_bytes; 1282 int ret; 1283 1284 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *bytes); 1285 1286 again: 1287 start = offset; 1288 remaining = *bytes; 1289 cluster_offset = 0; 1290 *host_offset = 0; 1291 cur_bytes = 0; 1292 *m = NULL; 1293 1294 while (true) { 1295 1296 if (!*host_offset) { 1297 *host_offset = start_of_cluster(s, cluster_offset); 1298 } 1299 1300 assert(remaining >= cur_bytes); 1301 1302 start += cur_bytes; 1303 remaining -= cur_bytes; 1304 cluster_offset += cur_bytes; 1305 1306 if (remaining == 0) { 1307 break; 1308 } 1309 1310 cur_bytes = remaining; 1311 1312 /* 1313 * Now start gathering as many contiguous clusters as possible: 1314 * 1315 * 1. Check for overlaps with in-flight allocations 1316 * 1317 * a) Overlap not in the first cluster -> shorten this request and 1318 * let the caller handle the rest in its next loop iteration. 1319 * 1320 * b) Real overlaps of two requests. Yield and restart the search 1321 * for contiguous clusters (the situation could have changed 1322 * while we were sleeping) 1323 * 1324 * c) TODO: Request starts in the same cluster as the in-flight 1325 * allocation ends. Shorten the COW of the in-fight allocation, 1326 * set cluster_offset to write to the same cluster and set up 1327 * the right synchronisation between the in-flight request and 1328 * the new one. 1329 */ 1330 ret = handle_dependencies(bs, start, &cur_bytes, m); 1331 if (ret == -EAGAIN) { 1332 /* Currently handle_dependencies() doesn't yield if we already had 1333 * an allocation. If it did, we would have to clean up the L2Meta 1334 * structs before starting over. */ 1335 assert(*m == NULL); 1336 goto again; 1337 } else if (ret < 0) { 1338 return ret; 1339 } else if (cur_bytes == 0) { 1340 break; 1341 } else { 1342 /* handle_dependencies() may have decreased cur_bytes (shortened 1343 * the allocations below) so that the next dependency is processed 1344 * correctly during the next loop iteration. */ 1345 } 1346 1347 /* 1348 * 2. Count contiguous COPIED clusters. 1349 */ 1350 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1351 if (ret < 0) { 1352 return ret; 1353 } else if (ret) { 1354 continue; 1355 } else if (cur_bytes == 0) { 1356 break; 1357 } 1358 1359 /* 1360 * 3. If the request still hasn't completed, allocate new clusters, 1361 * considering any cluster_offset of steps 1c or 2. 1362 */ 1363 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1364 if (ret < 0) { 1365 return ret; 1366 } else if (ret) { 1367 continue; 1368 } else { 1369 assert(cur_bytes == 0); 1370 break; 1371 } 1372 } 1373 1374 *bytes -= remaining; 1375 assert(*bytes > 0); 1376 assert(*host_offset != 0); 1377 1378 return 0; 1379 } 1380 1381 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1382 const uint8_t *buf, int buf_size) 1383 { 1384 z_stream strm1, *strm = &strm1; 1385 int ret, out_len; 1386 1387 memset(strm, 0, sizeof(*strm)); 1388 1389 strm->next_in = (uint8_t *)buf; 1390 strm->avail_in = buf_size; 1391 strm->next_out = out_buf; 1392 strm->avail_out = out_buf_size; 1393 1394 ret = inflateInit2(strm, -12); 1395 if (ret != Z_OK) 1396 return -1; 1397 ret = inflate(strm, Z_FINISH); 1398 out_len = strm->next_out - out_buf; 1399 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1400 out_len != out_buf_size) { 1401 inflateEnd(strm); 1402 return -1; 1403 } 1404 inflateEnd(strm); 1405 return 0; 1406 } 1407 1408 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1409 { 1410 BDRVQcow2State *s = bs->opaque; 1411 int ret, csize, nb_csectors, sector_offset; 1412 uint64_t coffset; 1413 1414 coffset = cluster_offset & s->cluster_offset_mask; 1415 if (s->cluster_cache_offset != coffset) { 1416 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1417 sector_offset = coffset & 511; 1418 csize = nb_csectors * 512 - sector_offset; 1419 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1420 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, 1421 nb_csectors); 1422 if (ret < 0) { 1423 return ret; 1424 } 1425 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1426 s->cluster_data + sector_offset, csize) < 0) { 1427 return -EIO; 1428 } 1429 s->cluster_cache_offset = coffset; 1430 } 1431 return 0; 1432 } 1433 1434 /* 1435 * This discards as many clusters of nb_clusters as possible at once (i.e. 1436 * all clusters in the same L2 table) and returns the number of discarded 1437 * clusters. 1438 */ 1439 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1440 uint64_t nb_clusters, enum qcow2_discard_type type, 1441 bool full_discard) 1442 { 1443 BDRVQcow2State *s = bs->opaque; 1444 uint64_t *l2_table; 1445 int l2_index; 1446 int ret; 1447 int i; 1448 1449 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1450 if (ret < 0) { 1451 return ret; 1452 } 1453 1454 /* Limit nb_clusters to one L2 table */ 1455 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1456 assert(nb_clusters <= INT_MAX); 1457 1458 for (i = 0; i < nb_clusters; i++) { 1459 uint64_t old_l2_entry; 1460 1461 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1462 1463 /* 1464 * If full_discard is false, make sure that a discarded area reads back 1465 * as zeroes for v3 images (we cannot do it for v2 without actually 1466 * writing a zero-filled buffer). We can skip the operation if the 1467 * cluster is already marked as zero, or if it's unallocated and we 1468 * don't have a backing file. 1469 * 1470 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1471 * holding s->lock, so that doesn't work today. 1472 * 1473 * If full_discard is true, the sector should not read back as zeroes, 1474 * but rather fall through to the backing file. 1475 */ 1476 switch (qcow2_get_cluster_type(old_l2_entry)) { 1477 case QCOW2_CLUSTER_UNALLOCATED: 1478 if (full_discard || !bs->backing) { 1479 continue; 1480 } 1481 break; 1482 1483 case QCOW2_CLUSTER_ZERO: 1484 if (!full_discard) { 1485 continue; 1486 } 1487 break; 1488 1489 case QCOW2_CLUSTER_NORMAL: 1490 case QCOW2_CLUSTER_COMPRESSED: 1491 break; 1492 1493 default: 1494 abort(); 1495 } 1496 1497 /* First remove L2 entries */ 1498 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1499 if (!full_discard && s->qcow_version >= 3) { 1500 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1501 } else { 1502 l2_table[l2_index + i] = cpu_to_be64(0); 1503 } 1504 1505 /* Then decrease the refcount */ 1506 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1507 } 1508 1509 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1510 1511 return nb_clusters; 1512 } 1513 1514 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1515 int nb_sectors, enum qcow2_discard_type type, bool full_discard) 1516 { 1517 BDRVQcow2State *s = bs->opaque; 1518 uint64_t end_offset; 1519 uint64_t nb_clusters; 1520 int ret; 1521 1522 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1523 1524 /* Round start up and end down */ 1525 offset = align_offset(offset, s->cluster_size); 1526 end_offset = start_of_cluster(s, end_offset); 1527 1528 if (offset > end_offset) { 1529 return 0; 1530 } 1531 1532 nb_clusters = size_to_clusters(s, end_offset - offset); 1533 1534 s->cache_discards = true; 1535 1536 /* Each L2 table is handled by its own loop iteration */ 1537 while (nb_clusters > 0) { 1538 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard); 1539 if (ret < 0) { 1540 goto fail; 1541 } 1542 1543 nb_clusters -= ret; 1544 offset += (ret * s->cluster_size); 1545 } 1546 1547 ret = 0; 1548 fail: 1549 s->cache_discards = false; 1550 qcow2_process_discards(bs, ret); 1551 1552 return ret; 1553 } 1554 1555 /* 1556 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1557 * all clusters in the same L2 table) and returns the number of zeroed 1558 * clusters. 1559 */ 1560 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1561 uint64_t nb_clusters) 1562 { 1563 BDRVQcow2State *s = bs->opaque; 1564 uint64_t *l2_table; 1565 int l2_index; 1566 int ret; 1567 int i; 1568 1569 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1570 if (ret < 0) { 1571 return ret; 1572 } 1573 1574 /* Limit nb_clusters to one L2 table */ 1575 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1576 assert(nb_clusters <= INT_MAX); 1577 1578 for (i = 0; i < nb_clusters; i++) { 1579 uint64_t old_offset; 1580 1581 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1582 1583 /* Update L2 entries */ 1584 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1585 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1586 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1587 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1588 } else { 1589 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1590 } 1591 } 1592 1593 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1594 1595 return nb_clusters; 1596 } 1597 1598 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1599 { 1600 BDRVQcow2State *s = bs->opaque; 1601 uint64_t nb_clusters; 1602 int ret; 1603 1604 /* The zero flag is only supported by version 3 and newer */ 1605 if (s->qcow_version < 3) { 1606 return -ENOTSUP; 1607 } 1608 1609 /* Each L2 table is handled by its own loop iteration */ 1610 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1611 1612 s->cache_discards = true; 1613 1614 while (nb_clusters > 0) { 1615 ret = zero_single_l2(bs, offset, nb_clusters); 1616 if (ret < 0) { 1617 goto fail; 1618 } 1619 1620 nb_clusters -= ret; 1621 offset += (ret * s->cluster_size); 1622 } 1623 1624 ret = 0; 1625 fail: 1626 s->cache_discards = false; 1627 qcow2_process_discards(bs, ret); 1628 1629 return ret; 1630 } 1631 1632 /* 1633 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1634 * non-backed non-pre-allocated zero clusters). 1635 * 1636 * l1_entries and *visited_l1_entries are used to keep track of progress for 1637 * status_cb(). l1_entries contains the total number of L1 entries and 1638 * *visited_l1_entries counts all visited L1 entries. 1639 */ 1640 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1641 int l1_size, int64_t *visited_l1_entries, 1642 int64_t l1_entries, 1643 BlockDriverAmendStatusCB *status_cb, 1644 void *cb_opaque) 1645 { 1646 BDRVQcow2State *s = bs->opaque; 1647 bool is_active_l1 = (l1_table == s->l1_table); 1648 uint64_t *l2_table = NULL; 1649 int ret; 1650 int i, j; 1651 1652 if (!is_active_l1) { 1653 /* inactive L2 tables require a buffer to be stored in when loading 1654 * them from disk */ 1655 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size); 1656 if (l2_table == NULL) { 1657 return -ENOMEM; 1658 } 1659 } 1660 1661 for (i = 0; i < l1_size; i++) { 1662 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1663 bool l2_dirty = false; 1664 uint64_t l2_refcount; 1665 1666 if (!l2_offset) { 1667 /* unallocated */ 1668 (*visited_l1_entries)++; 1669 if (status_cb) { 1670 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1671 } 1672 continue; 1673 } 1674 1675 if (offset_into_cluster(s, l2_offset)) { 1676 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1677 PRIx64 " unaligned (L1 index: %#x)", 1678 l2_offset, i); 1679 ret = -EIO; 1680 goto fail; 1681 } 1682 1683 if (is_active_l1) { 1684 /* get active L2 tables from cache */ 1685 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1686 (void **)&l2_table); 1687 } else { 1688 /* load inactive L2 tables from disk */ 1689 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1690 (void *)l2_table, s->cluster_sectors); 1691 } 1692 if (ret < 0) { 1693 goto fail; 1694 } 1695 1696 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1697 &l2_refcount); 1698 if (ret < 0) { 1699 goto fail; 1700 } 1701 1702 for (j = 0; j < s->l2_size; j++) { 1703 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1704 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1705 int cluster_type = qcow2_get_cluster_type(l2_entry); 1706 bool preallocated = offset != 0; 1707 1708 if (cluster_type != QCOW2_CLUSTER_ZERO) { 1709 continue; 1710 } 1711 1712 if (!preallocated) { 1713 if (!bs->backing) { 1714 /* not backed; therefore we can simply deallocate the 1715 * cluster */ 1716 l2_table[j] = 0; 1717 l2_dirty = true; 1718 continue; 1719 } 1720 1721 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1722 if (offset < 0) { 1723 ret = offset; 1724 goto fail; 1725 } 1726 1727 if (l2_refcount > 1) { 1728 /* For shared L2 tables, set the refcount accordingly (it is 1729 * already 1 and needs to be l2_refcount) */ 1730 ret = qcow2_update_cluster_refcount(bs, 1731 offset >> s->cluster_bits, 1732 refcount_diff(1, l2_refcount), false, 1733 QCOW2_DISCARD_OTHER); 1734 if (ret < 0) { 1735 qcow2_free_clusters(bs, offset, s->cluster_size, 1736 QCOW2_DISCARD_OTHER); 1737 goto fail; 1738 } 1739 } 1740 } 1741 1742 if (offset_into_cluster(s, offset)) { 1743 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1744 "%#" PRIx64 " unaligned (L2 offset: %#" 1745 PRIx64 ", L2 index: %#x)", offset, 1746 l2_offset, j); 1747 if (!preallocated) { 1748 qcow2_free_clusters(bs, offset, s->cluster_size, 1749 QCOW2_DISCARD_ALWAYS); 1750 } 1751 ret = -EIO; 1752 goto fail; 1753 } 1754 1755 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1756 if (ret < 0) { 1757 if (!preallocated) { 1758 qcow2_free_clusters(bs, offset, s->cluster_size, 1759 QCOW2_DISCARD_ALWAYS); 1760 } 1761 goto fail; 1762 } 1763 1764 ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0); 1765 if (ret < 0) { 1766 if (!preallocated) { 1767 qcow2_free_clusters(bs, offset, s->cluster_size, 1768 QCOW2_DISCARD_ALWAYS); 1769 } 1770 goto fail; 1771 } 1772 1773 if (l2_refcount == 1) { 1774 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1775 } else { 1776 l2_table[j] = cpu_to_be64(offset); 1777 } 1778 l2_dirty = true; 1779 } 1780 1781 if (is_active_l1) { 1782 if (l2_dirty) { 1783 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1784 qcow2_cache_depends_on_flush(s->l2_table_cache); 1785 } 1786 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1787 } else { 1788 if (l2_dirty) { 1789 ret = qcow2_pre_write_overlap_check(bs, 1790 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1791 s->cluster_size); 1792 if (ret < 0) { 1793 goto fail; 1794 } 1795 1796 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1797 (void *)l2_table, s->cluster_sectors); 1798 if (ret < 0) { 1799 goto fail; 1800 } 1801 } 1802 } 1803 1804 (*visited_l1_entries)++; 1805 if (status_cb) { 1806 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1807 } 1808 } 1809 1810 ret = 0; 1811 1812 fail: 1813 if (l2_table) { 1814 if (!is_active_l1) { 1815 qemu_vfree(l2_table); 1816 } else { 1817 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1818 } 1819 } 1820 return ret; 1821 } 1822 1823 /* 1824 * For backed images, expands all zero clusters on the image. For non-backed 1825 * images, deallocates all non-pre-allocated zero clusters (and claims the 1826 * allocation for pre-allocated ones). This is important for downgrading to a 1827 * qcow2 version which doesn't yet support metadata zero clusters. 1828 */ 1829 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1830 BlockDriverAmendStatusCB *status_cb, 1831 void *cb_opaque) 1832 { 1833 BDRVQcow2State *s = bs->opaque; 1834 uint64_t *l1_table = NULL; 1835 int64_t l1_entries = 0, visited_l1_entries = 0; 1836 int ret; 1837 int i, j; 1838 1839 if (status_cb) { 1840 l1_entries = s->l1_size; 1841 for (i = 0; i < s->nb_snapshots; i++) { 1842 l1_entries += s->snapshots[i].l1_size; 1843 } 1844 } 1845 1846 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1847 &visited_l1_entries, l1_entries, 1848 status_cb, cb_opaque); 1849 if (ret < 0) { 1850 goto fail; 1851 } 1852 1853 /* Inactive L1 tables may point to active L2 tables - therefore it is 1854 * necessary to flush the L2 table cache before trying to access the L2 1855 * tables pointed to by inactive L1 entries (else we might try to expand 1856 * zero clusters that have already been expanded); furthermore, it is also 1857 * necessary to empty the L2 table cache, since it may contain tables which 1858 * are now going to be modified directly on disk, bypassing the cache. 1859 * qcow2_cache_empty() does both for us. */ 1860 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1861 if (ret < 0) { 1862 goto fail; 1863 } 1864 1865 for (i = 0; i < s->nb_snapshots; i++) { 1866 int l1_sectors = DIV_ROUND_UP(s->snapshots[i].l1_size * 1867 sizeof(uint64_t), BDRV_SECTOR_SIZE); 1868 1869 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1870 1871 ret = bdrv_read(bs->file, 1872 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, 1873 (void *)l1_table, l1_sectors); 1874 if (ret < 0) { 1875 goto fail; 1876 } 1877 1878 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1879 be64_to_cpus(&l1_table[j]); 1880 } 1881 1882 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1883 &visited_l1_entries, l1_entries, 1884 status_cb, cb_opaque); 1885 if (ret < 0) { 1886 goto fail; 1887 } 1888 } 1889 1890 ret = 0; 1891 1892 fail: 1893 g_free(l1_table); 1894 return ret; 1895 } 1896