1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 33 bool exact_size) 34 { 35 BDRVQcow2State *s = bs->opaque; 36 int new_l1_size2, ret, i; 37 uint64_t *new_l1_table; 38 int64_t old_l1_table_offset, old_l1_size; 39 int64_t new_l1_table_offset, new_l1_size; 40 uint8_t data[12]; 41 42 if (min_size <= s->l1_size) 43 return 0; 44 45 /* Do a sanity check on min_size before trying to calculate new_l1_size 46 * (this prevents overflows during the while loop for the calculation of 47 * new_l1_size) */ 48 if (min_size > INT_MAX / sizeof(uint64_t)) { 49 return -EFBIG; 50 } 51 52 if (exact_size) { 53 new_l1_size = min_size; 54 } else { 55 /* Bump size up to reduce the number of times we have to grow */ 56 new_l1_size = s->l1_size; 57 if (new_l1_size == 0) { 58 new_l1_size = 1; 59 } 60 while (min_size > new_l1_size) { 61 new_l1_size = (new_l1_size * 3 + 1) / 2; 62 } 63 } 64 65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) { 66 return -EFBIG; 67 } 68 69 #ifdef DEBUG_ALLOC2 70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 71 s->l1_size, new_l1_size); 72 #endif 73 74 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 75 new_l1_table = qemu_try_blockalign(bs->file, 76 align_offset(new_l1_size2, 512)); 77 if (new_l1_table == NULL) { 78 return -ENOMEM; 79 } 80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 81 82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 83 84 /* write new table (align to cluster) */ 85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 87 if (new_l1_table_offset < 0) { 88 qemu_vfree(new_l1_table); 89 return new_l1_table_offset; 90 } 91 92 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 93 if (ret < 0) { 94 goto fail; 95 } 96 97 /* the L1 position has not yet been updated, so these clusters must 98 * indeed be completely free */ 99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 100 new_l1_size2); 101 if (ret < 0) { 102 goto fail; 103 } 104 105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 106 for(i = 0; i < s->l1_size; i++) 107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 109 if (ret < 0) 110 goto fail; 111 for(i = 0; i < s->l1_size; i++) 112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 113 114 /* set new table */ 115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 116 cpu_to_be32w((uint32_t*)data, new_l1_size); 117 stq_be_p(data + 4, new_l1_table_offset); 118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 119 if (ret < 0) { 120 goto fail; 121 } 122 qemu_vfree(s->l1_table); 123 old_l1_table_offset = s->l1_table_offset; 124 s->l1_table_offset = new_l1_table_offset; 125 s->l1_table = new_l1_table; 126 old_l1_size = s->l1_size; 127 s->l1_size = new_l1_size; 128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 129 QCOW2_DISCARD_OTHER); 130 return 0; 131 fail: 132 qemu_vfree(new_l1_table); 133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 134 QCOW2_DISCARD_OTHER); 135 return ret; 136 } 137 138 /* 139 * l2_load 140 * 141 * Loads a L2 table into memory. If the table is in the cache, the cache 142 * is used; otherwise the L2 table is loaded from the image file. 143 * 144 * Returns a pointer to the L2 table on success, or NULL if the read from 145 * the image file failed. 146 */ 147 148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 149 uint64_t **l2_table) 150 { 151 BDRVQcow2State *s = bs->opaque; 152 int ret; 153 154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 155 156 return ret; 157 } 158 159 /* 160 * Writes one sector of the L1 table to the disk (can't update single entries 161 * and we really don't want bdrv_pread to perform a read-modify-write) 162 */ 163 #define L1_ENTRIES_PER_SECTOR (512 / 8) 164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 165 { 166 BDRVQcow2State *s = bs->opaque; 167 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 168 int l1_start_index; 169 int i, ret; 170 171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 172 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 173 i++) 174 { 175 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 176 } 177 178 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 179 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 180 if (ret < 0) { 181 return ret; 182 } 183 184 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 185 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 186 buf, sizeof(buf)); 187 if (ret < 0) { 188 return ret; 189 } 190 191 return 0; 192 } 193 194 /* 195 * l2_allocate 196 * 197 * Allocate a new l2 entry in the file. If l1_index points to an already 198 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 199 * table) copy the contents of the old L2 table into the newly allocated one. 200 * Otherwise the new table is initialized with zeros. 201 * 202 */ 203 204 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 205 { 206 BDRVQcow2State *s = bs->opaque; 207 uint64_t old_l2_offset; 208 uint64_t *l2_table = NULL; 209 int64_t l2_offset; 210 int ret; 211 212 old_l2_offset = s->l1_table[l1_index]; 213 214 trace_qcow2_l2_allocate(bs, l1_index); 215 216 /* allocate a new l2 entry */ 217 218 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 219 if (l2_offset < 0) { 220 ret = l2_offset; 221 goto fail; 222 } 223 224 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 225 if (ret < 0) { 226 goto fail; 227 } 228 229 /* allocate a new entry in the l2 cache */ 230 231 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 232 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 233 if (ret < 0) { 234 goto fail; 235 } 236 237 l2_table = *table; 238 239 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 240 /* if there was no old l2 table, clear the new table */ 241 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 242 } else { 243 uint64_t* old_table; 244 245 /* if there was an old l2 table, read it from the disk */ 246 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 247 ret = qcow2_cache_get(bs, s->l2_table_cache, 248 old_l2_offset & L1E_OFFSET_MASK, 249 (void**) &old_table); 250 if (ret < 0) { 251 goto fail; 252 } 253 254 memcpy(l2_table, old_table, s->cluster_size); 255 256 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 257 } 258 259 /* write the l2 table to the file */ 260 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 261 262 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 263 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 264 ret = qcow2_cache_flush(bs, s->l2_table_cache); 265 if (ret < 0) { 266 goto fail; 267 } 268 269 /* update the L1 entry */ 270 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 271 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 272 ret = qcow2_write_l1_entry(bs, l1_index); 273 if (ret < 0) { 274 goto fail; 275 } 276 277 *table = l2_table; 278 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 279 return 0; 280 281 fail: 282 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 283 if (l2_table != NULL) { 284 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 285 } 286 s->l1_table[l1_index] = old_l2_offset; 287 if (l2_offset > 0) { 288 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 289 QCOW2_DISCARD_ALWAYS); 290 } 291 return ret; 292 } 293 294 /* 295 * Checks how many clusters in a given L2 table are contiguous in the image 296 * file. As soon as one of the flags in the bitmask stop_flags changes compared 297 * to the first cluster, the search is stopped and the cluster is not counted 298 * as contiguous. (This allows it, for example, to stop at the first compressed 299 * cluster which may require a different handling) 300 */ 301 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 302 uint64_t *l2_table, uint64_t stop_flags) 303 { 304 int i; 305 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 306 uint64_t first_entry = be64_to_cpu(l2_table[0]); 307 uint64_t offset = first_entry & mask; 308 309 if (!offset) 310 return 0; 311 312 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); 313 314 for (i = 0; i < nb_clusters; i++) { 315 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 316 if (offset + (uint64_t) i * cluster_size != l2_entry) { 317 break; 318 } 319 } 320 321 return i; 322 } 323 324 static int count_contiguous_free_clusters(int nb_clusters, uint64_t *l2_table) 325 { 326 int i; 327 328 for (i = 0; i < nb_clusters; i++) { 329 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 330 331 if (type != QCOW2_CLUSTER_UNALLOCATED) { 332 break; 333 } 334 } 335 336 return i; 337 } 338 339 /* The crypt function is compatible with the linux cryptoloop 340 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 341 supported */ 342 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, 343 uint8_t *out_buf, const uint8_t *in_buf, 344 int nb_sectors, bool enc, 345 Error **errp) 346 { 347 union { 348 uint64_t ll[2]; 349 uint8_t b[16]; 350 } ivec; 351 int i; 352 int ret; 353 354 for(i = 0; i < nb_sectors; i++) { 355 ivec.ll[0] = cpu_to_le64(sector_num); 356 ivec.ll[1] = 0; 357 if (qcrypto_cipher_setiv(s->cipher, 358 ivec.b, G_N_ELEMENTS(ivec.b), 359 errp) < 0) { 360 return -1; 361 } 362 if (enc) { 363 ret = qcrypto_cipher_encrypt(s->cipher, 364 in_buf, 365 out_buf, 366 512, 367 errp); 368 } else { 369 ret = qcrypto_cipher_decrypt(s->cipher, 370 in_buf, 371 out_buf, 372 512, 373 errp); 374 } 375 if (ret < 0) { 376 return -1; 377 } 378 sector_num++; 379 in_buf += 512; 380 out_buf += 512; 381 } 382 return 0; 383 } 384 385 static int coroutine_fn copy_sectors(BlockDriverState *bs, 386 uint64_t start_sect, 387 uint64_t cluster_offset, 388 int n_start, int n_end) 389 { 390 BDRVQcow2State *s = bs->opaque; 391 QEMUIOVector qiov; 392 struct iovec iov; 393 int n, ret; 394 395 n = n_end - n_start; 396 if (n <= 0) { 397 return 0; 398 } 399 400 iov.iov_len = n * BDRV_SECTOR_SIZE; 401 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 402 if (iov.iov_base == NULL) { 403 return -ENOMEM; 404 } 405 406 qemu_iovec_init_external(&qiov, &iov, 1); 407 408 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 409 410 if (!bs->drv) { 411 ret = -ENOMEDIUM; 412 goto out; 413 } 414 415 /* Call .bdrv_co_readv() directly instead of using the public block-layer 416 * interface. This avoids double I/O throttling and request tracking, 417 * which can lead to deadlock when block layer copy-on-read is enabled. 418 */ 419 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 420 if (ret < 0) { 421 goto out; 422 } 423 424 if (bs->encrypted) { 425 Error *err = NULL; 426 assert(s->cipher); 427 if (qcow2_encrypt_sectors(s, start_sect + n_start, 428 iov.iov_base, iov.iov_base, n, 429 true, &err) < 0) { 430 ret = -EIO; 431 error_free(err); 432 goto out; 433 } 434 } 435 436 ret = qcow2_pre_write_overlap_check(bs, 0, 437 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 438 if (ret < 0) { 439 goto out; 440 } 441 442 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 443 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 444 if (ret < 0) { 445 goto out; 446 } 447 448 ret = 0; 449 out: 450 qemu_vfree(iov.iov_base); 451 return ret; 452 } 453 454 455 /* 456 * get_cluster_offset 457 * 458 * For a given offset of the disk image, find the cluster offset in 459 * qcow2 file. The offset is stored in *cluster_offset. 460 * 461 * on entry, *num is the number of contiguous sectors we'd like to 462 * access following offset. 463 * 464 * on exit, *num is the number of contiguous sectors we can read. 465 * 466 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 467 * cases. 468 */ 469 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 470 int *num, uint64_t *cluster_offset) 471 { 472 BDRVQcow2State *s = bs->opaque; 473 unsigned int l2_index; 474 uint64_t l1_index, l2_offset, *l2_table; 475 int l1_bits, c; 476 unsigned int index_in_cluster, nb_clusters; 477 uint64_t nb_available, nb_needed; 478 int ret; 479 480 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 481 nb_needed = *num + index_in_cluster; 482 483 l1_bits = s->l2_bits + s->cluster_bits; 484 485 /* compute how many bytes there are between the offset and 486 * the end of the l1 entry 487 */ 488 489 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 490 491 /* compute the number of available sectors */ 492 493 nb_available = (nb_available >> 9) + index_in_cluster; 494 495 if (nb_needed > nb_available) { 496 nb_needed = nb_available; 497 } 498 assert(nb_needed <= INT_MAX); 499 500 *cluster_offset = 0; 501 502 /* seek to the l2 offset in the l1 table */ 503 504 l1_index = offset >> l1_bits; 505 if (l1_index >= s->l1_size) { 506 ret = QCOW2_CLUSTER_UNALLOCATED; 507 goto out; 508 } 509 510 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 511 if (!l2_offset) { 512 ret = QCOW2_CLUSTER_UNALLOCATED; 513 goto out; 514 } 515 516 if (offset_into_cluster(s, l2_offset)) { 517 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 518 " unaligned (L1 index: %#" PRIx64 ")", 519 l2_offset, l1_index); 520 return -EIO; 521 } 522 523 /* load the l2 table in memory */ 524 525 ret = l2_load(bs, l2_offset, &l2_table); 526 if (ret < 0) { 527 return ret; 528 } 529 530 /* find the cluster offset for the given disk offset */ 531 532 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 533 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 534 535 /* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */ 536 nb_clusters = size_to_clusters(s, nb_needed << 9); 537 538 ret = qcow2_get_cluster_type(*cluster_offset); 539 switch (ret) { 540 case QCOW2_CLUSTER_COMPRESSED: 541 /* Compressed clusters can only be processed one by one */ 542 c = 1; 543 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 544 break; 545 case QCOW2_CLUSTER_ZERO: 546 if (s->qcow_version < 3) { 547 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 548 " in pre-v3 image (L2 offset: %#" PRIx64 549 ", L2 index: %#x)", l2_offset, l2_index); 550 ret = -EIO; 551 goto fail; 552 } 553 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 554 &l2_table[l2_index], QCOW_OFLAG_ZERO); 555 *cluster_offset = 0; 556 break; 557 case QCOW2_CLUSTER_UNALLOCATED: 558 /* how many empty clusters ? */ 559 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 560 *cluster_offset = 0; 561 break; 562 case QCOW2_CLUSTER_NORMAL: 563 /* how many allocated clusters ? */ 564 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 565 &l2_table[l2_index], QCOW_OFLAG_ZERO); 566 *cluster_offset &= L2E_OFFSET_MASK; 567 if (offset_into_cluster(s, *cluster_offset)) { 568 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 569 PRIx64 " unaligned (L2 offset: %#" PRIx64 570 ", L2 index: %#x)", *cluster_offset, 571 l2_offset, l2_index); 572 ret = -EIO; 573 goto fail; 574 } 575 break; 576 default: 577 abort(); 578 } 579 580 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 581 582 nb_available = (c * s->cluster_sectors); 583 584 out: 585 if (nb_available > nb_needed) 586 nb_available = nb_needed; 587 588 *num = nb_available - index_in_cluster; 589 590 return ret; 591 592 fail: 593 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 594 return ret; 595 } 596 597 /* 598 * get_cluster_table 599 * 600 * for a given disk offset, load (and allocate if needed) 601 * the l2 table. 602 * 603 * the l2 table offset in the qcow2 file and the cluster index 604 * in the l2 table are given to the caller. 605 * 606 * Returns 0 on success, -errno in failure case 607 */ 608 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 609 uint64_t **new_l2_table, 610 int *new_l2_index) 611 { 612 BDRVQcow2State *s = bs->opaque; 613 unsigned int l2_index; 614 uint64_t l1_index, l2_offset; 615 uint64_t *l2_table = NULL; 616 int ret; 617 618 /* seek to the l2 offset in the l1 table */ 619 620 l1_index = offset >> (s->l2_bits + s->cluster_bits); 621 if (l1_index >= s->l1_size) { 622 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 623 if (ret < 0) { 624 return ret; 625 } 626 } 627 628 assert(l1_index < s->l1_size); 629 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 630 if (offset_into_cluster(s, l2_offset)) { 631 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 632 " unaligned (L1 index: %#" PRIx64 ")", 633 l2_offset, l1_index); 634 return -EIO; 635 } 636 637 /* seek the l2 table of the given l2 offset */ 638 639 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 640 /* load the l2 table in memory */ 641 ret = l2_load(bs, l2_offset, &l2_table); 642 if (ret < 0) { 643 return ret; 644 } 645 } else { 646 /* First allocate a new L2 table (and do COW if needed) */ 647 ret = l2_allocate(bs, l1_index, &l2_table); 648 if (ret < 0) { 649 return ret; 650 } 651 652 /* Then decrease the refcount of the old table */ 653 if (l2_offset) { 654 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 655 QCOW2_DISCARD_OTHER); 656 } 657 } 658 659 /* find the cluster offset for the given disk offset */ 660 661 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 662 663 *new_l2_table = l2_table; 664 *new_l2_index = l2_index; 665 666 return 0; 667 } 668 669 /* 670 * alloc_compressed_cluster_offset 671 * 672 * For a given offset of the disk image, return cluster offset in 673 * qcow2 file. 674 * 675 * If the offset is not found, allocate a new compressed cluster. 676 * 677 * Return the cluster offset if successful, 678 * Return 0, otherwise. 679 * 680 */ 681 682 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 683 uint64_t offset, 684 int compressed_size) 685 { 686 BDRVQcow2State *s = bs->opaque; 687 int l2_index, ret; 688 uint64_t *l2_table; 689 int64_t cluster_offset; 690 int nb_csectors; 691 692 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 693 if (ret < 0) { 694 return 0; 695 } 696 697 /* Compression can't overwrite anything. Fail if the cluster was already 698 * allocated. */ 699 cluster_offset = be64_to_cpu(l2_table[l2_index]); 700 if (cluster_offset & L2E_OFFSET_MASK) { 701 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 702 return 0; 703 } 704 705 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 706 if (cluster_offset < 0) { 707 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 708 return 0; 709 } 710 711 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 712 (cluster_offset >> 9); 713 714 cluster_offset |= QCOW_OFLAG_COMPRESSED | 715 ((uint64_t)nb_csectors << s->csize_shift); 716 717 /* update L2 table */ 718 719 /* compressed clusters never have the copied flag */ 720 721 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 722 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 723 l2_table[l2_index] = cpu_to_be64(cluster_offset); 724 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 725 726 return cluster_offset; 727 } 728 729 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 730 { 731 BDRVQcow2State *s = bs->opaque; 732 int ret; 733 734 if (r->nb_sectors == 0) { 735 return 0; 736 } 737 738 qemu_co_mutex_unlock(&s->lock); 739 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 740 r->offset / BDRV_SECTOR_SIZE, 741 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 742 qemu_co_mutex_lock(&s->lock); 743 744 if (ret < 0) { 745 return ret; 746 } 747 748 /* 749 * Before we update the L2 table to actually point to the new cluster, we 750 * need to be sure that the refcounts have been increased and COW was 751 * handled. 752 */ 753 qcow2_cache_depends_on_flush(s->l2_table_cache); 754 755 return 0; 756 } 757 758 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 759 { 760 BDRVQcow2State *s = bs->opaque; 761 int i, j = 0, l2_index, ret; 762 uint64_t *old_cluster, *l2_table; 763 uint64_t cluster_offset = m->alloc_offset; 764 765 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 766 assert(m->nb_clusters > 0); 767 768 old_cluster = g_try_new(uint64_t, m->nb_clusters); 769 if (old_cluster == NULL) { 770 ret = -ENOMEM; 771 goto err; 772 } 773 774 /* copy content of unmodified sectors */ 775 ret = perform_cow(bs, m, &m->cow_start); 776 if (ret < 0) { 777 goto err; 778 } 779 780 ret = perform_cow(bs, m, &m->cow_end); 781 if (ret < 0) { 782 goto err; 783 } 784 785 /* Update L2 table. */ 786 if (s->use_lazy_refcounts) { 787 qcow2_mark_dirty(bs); 788 } 789 if (qcow2_need_accurate_refcounts(s)) { 790 qcow2_cache_set_dependency(bs, s->l2_table_cache, 791 s->refcount_block_cache); 792 } 793 794 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 795 if (ret < 0) { 796 goto err; 797 } 798 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 799 800 assert(l2_index + m->nb_clusters <= s->l2_size); 801 for (i = 0; i < m->nb_clusters; i++) { 802 /* if two concurrent writes happen to the same unallocated cluster 803 * each write allocates separate cluster and writes data concurrently. 804 * The first one to complete updates l2 table with pointer to its 805 * cluster the second one has to do RMW (which is done above by 806 * copy_sectors()), update l2 table with its cluster pointer and free 807 * old cluster. This is what this loop does */ 808 if(l2_table[l2_index + i] != 0) 809 old_cluster[j++] = l2_table[l2_index + i]; 810 811 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 812 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 813 } 814 815 816 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 817 818 /* 819 * If this was a COW, we need to decrease the refcount of the old cluster. 820 * Also flush bs->file to get the right order for L2 and refcount update. 821 * 822 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 823 * clusters), the next write will reuse them anyway. 824 */ 825 if (j != 0) { 826 for (i = 0; i < j; i++) { 827 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 828 QCOW2_DISCARD_NEVER); 829 } 830 } 831 832 ret = 0; 833 err: 834 g_free(old_cluster); 835 return ret; 836 } 837 838 /* 839 * Returns the number of contiguous clusters that can be used for an allocating 840 * write, but require COW to be performed (this includes yet unallocated space, 841 * which must copy from the backing file) 842 */ 843 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 844 uint64_t *l2_table, int l2_index) 845 { 846 int i; 847 848 for (i = 0; i < nb_clusters; i++) { 849 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 850 int cluster_type = qcow2_get_cluster_type(l2_entry); 851 852 switch(cluster_type) { 853 case QCOW2_CLUSTER_NORMAL: 854 if (l2_entry & QCOW_OFLAG_COPIED) { 855 goto out; 856 } 857 break; 858 case QCOW2_CLUSTER_UNALLOCATED: 859 case QCOW2_CLUSTER_COMPRESSED: 860 case QCOW2_CLUSTER_ZERO: 861 break; 862 default: 863 abort(); 864 } 865 } 866 867 out: 868 assert(i <= nb_clusters); 869 return i; 870 } 871 872 /* 873 * Check if there already is an AIO write request in flight which allocates 874 * the same cluster. In this case we need to wait until the previous 875 * request has completed and updated the L2 table accordingly. 876 * 877 * Returns: 878 * 0 if there was no dependency. *cur_bytes indicates the number of 879 * bytes from guest_offset that can be read before the next 880 * dependency must be processed (or the request is complete) 881 * 882 * -EAGAIN if we had to wait for another request, previously gathered 883 * information on cluster allocation may be invalid now. The caller 884 * must start over anyway, so consider *cur_bytes undefined. 885 */ 886 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 887 uint64_t *cur_bytes, QCowL2Meta **m) 888 { 889 BDRVQcow2State *s = bs->opaque; 890 QCowL2Meta *old_alloc; 891 uint64_t bytes = *cur_bytes; 892 893 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 894 895 uint64_t start = guest_offset; 896 uint64_t end = start + bytes; 897 uint64_t old_start = l2meta_cow_start(old_alloc); 898 uint64_t old_end = l2meta_cow_end(old_alloc); 899 900 if (end <= old_start || start >= old_end) { 901 /* No intersection */ 902 } else { 903 if (start < old_start) { 904 /* Stop at the start of a running allocation */ 905 bytes = old_start - start; 906 } else { 907 bytes = 0; 908 } 909 910 /* Stop if already an l2meta exists. After yielding, it wouldn't 911 * be valid any more, so we'd have to clean up the old L2Metas 912 * and deal with requests depending on them before starting to 913 * gather new ones. Not worth the trouble. */ 914 if (bytes == 0 && *m) { 915 *cur_bytes = 0; 916 return 0; 917 } 918 919 if (bytes == 0) { 920 /* Wait for the dependency to complete. We need to recheck 921 * the free/allocated clusters when we continue. */ 922 qemu_co_mutex_unlock(&s->lock); 923 qemu_co_queue_wait(&old_alloc->dependent_requests); 924 qemu_co_mutex_lock(&s->lock); 925 return -EAGAIN; 926 } 927 } 928 } 929 930 /* Make sure that existing clusters and new allocations are only used up to 931 * the next dependency if we shortened the request above */ 932 *cur_bytes = bytes; 933 934 return 0; 935 } 936 937 /* 938 * Checks how many already allocated clusters that don't require a copy on 939 * write there are at the given guest_offset (up to *bytes). If 940 * *host_offset is not zero, only physically contiguous clusters beginning at 941 * this host offset are counted. 942 * 943 * Note that guest_offset may not be cluster aligned. In this case, the 944 * returned *host_offset points to exact byte referenced by guest_offset and 945 * therefore isn't cluster aligned as well. 946 * 947 * Returns: 948 * 0: if no allocated clusters are available at the given offset. 949 * *bytes is normally unchanged. It is set to 0 if the cluster 950 * is allocated and doesn't need COW, but doesn't have the right 951 * physical offset. 952 * 953 * 1: if allocated clusters that don't require a COW are available at 954 * the requested offset. *bytes may have decreased and describes 955 * the length of the area that can be written to. 956 * 957 * -errno: in error cases 958 */ 959 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 960 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 961 { 962 BDRVQcow2State *s = bs->opaque; 963 int l2_index; 964 uint64_t cluster_offset; 965 uint64_t *l2_table; 966 uint64_t nb_clusters; 967 unsigned int keep_clusters; 968 int ret; 969 970 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 971 *bytes); 972 973 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 974 == offset_into_cluster(s, *host_offset)); 975 976 /* 977 * Calculate the number of clusters to look for. We stop at L2 table 978 * boundaries to keep things simple. 979 */ 980 nb_clusters = 981 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 982 983 l2_index = offset_to_l2_index(s, guest_offset); 984 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 985 assert(nb_clusters <= INT_MAX); 986 987 /* Find L2 entry for the first involved cluster */ 988 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 989 if (ret < 0) { 990 return ret; 991 } 992 993 cluster_offset = be64_to_cpu(l2_table[l2_index]); 994 995 /* Check how many clusters are already allocated and don't need COW */ 996 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 997 && (cluster_offset & QCOW_OFLAG_COPIED)) 998 { 999 /* If a specific host_offset is required, check it */ 1000 bool offset_matches = 1001 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1002 1003 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1004 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1005 "%#llx unaligned (guest offset: %#" PRIx64 1006 ")", cluster_offset & L2E_OFFSET_MASK, 1007 guest_offset); 1008 ret = -EIO; 1009 goto out; 1010 } 1011 1012 if (*host_offset != 0 && !offset_matches) { 1013 *bytes = 0; 1014 ret = 0; 1015 goto out; 1016 } 1017 1018 /* We keep all QCOW_OFLAG_COPIED clusters */ 1019 keep_clusters = 1020 count_contiguous_clusters(nb_clusters, s->cluster_size, 1021 &l2_table[l2_index], 1022 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1023 assert(keep_clusters <= nb_clusters); 1024 1025 *bytes = MIN(*bytes, 1026 keep_clusters * s->cluster_size 1027 - offset_into_cluster(s, guest_offset)); 1028 1029 ret = 1; 1030 } else { 1031 ret = 0; 1032 } 1033 1034 /* Cleanup */ 1035 out: 1036 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1037 1038 /* Only return a host offset if we actually made progress. Otherwise we 1039 * would make requirements for handle_alloc() that it can't fulfill */ 1040 if (ret > 0) { 1041 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1042 + offset_into_cluster(s, guest_offset); 1043 } 1044 1045 return ret; 1046 } 1047 1048 /* 1049 * Allocates new clusters for the given guest_offset. 1050 * 1051 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1052 * contain the number of clusters that have been allocated and are contiguous 1053 * in the image file. 1054 * 1055 * If *host_offset is non-zero, it specifies the offset in the image file at 1056 * which the new clusters must start. *nb_clusters can be 0 on return in this 1057 * case if the cluster at host_offset is already in use. If *host_offset is 1058 * zero, the clusters can be allocated anywhere in the image file. 1059 * 1060 * *host_offset is updated to contain the offset into the image file at which 1061 * the first allocated cluster starts. 1062 * 1063 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1064 * function has been waiting for another request and the allocation must be 1065 * restarted, but the whole request should not be failed. 1066 */ 1067 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1068 uint64_t *host_offset, uint64_t *nb_clusters) 1069 { 1070 BDRVQcow2State *s = bs->opaque; 1071 1072 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1073 *host_offset, *nb_clusters); 1074 1075 /* Allocate new clusters */ 1076 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1077 if (*host_offset == 0) { 1078 int64_t cluster_offset = 1079 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1080 if (cluster_offset < 0) { 1081 return cluster_offset; 1082 } 1083 *host_offset = cluster_offset; 1084 return 0; 1085 } else { 1086 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1087 if (ret < 0) { 1088 return ret; 1089 } 1090 *nb_clusters = ret; 1091 return 0; 1092 } 1093 } 1094 1095 /* 1096 * Allocates new clusters for an area that either is yet unallocated or needs a 1097 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1098 * the new allocation can match the specified host offset. 1099 * 1100 * Note that guest_offset may not be cluster aligned. In this case, the 1101 * returned *host_offset points to exact byte referenced by guest_offset and 1102 * therefore isn't cluster aligned as well. 1103 * 1104 * Returns: 1105 * 0: if no clusters could be allocated. *bytes is set to 0, 1106 * *host_offset is left unchanged. 1107 * 1108 * 1: if new clusters were allocated. *bytes may be decreased if the 1109 * new allocation doesn't cover all of the requested area. 1110 * *host_offset is updated to contain the host offset of the first 1111 * newly allocated cluster. 1112 * 1113 * -errno: in error cases 1114 */ 1115 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1116 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1117 { 1118 BDRVQcow2State *s = bs->opaque; 1119 int l2_index; 1120 uint64_t *l2_table; 1121 uint64_t entry; 1122 uint64_t nb_clusters; 1123 int ret; 1124 1125 uint64_t alloc_cluster_offset; 1126 1127 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1128 *bytes); 1129 assert(*bytes > 0); 1130 1131 /* 1132 * Calculate the number of clusters to look for. We stop at L2 table 1133 * boundaries to keep things simple. 1134 */ 1135 nb_clusters = 1136 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1137 1138 l2_index = offset_to_l2_index(s, guest_offset); 1139 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1140 assert(nb_clusters <= INT_MAX); 1141 1142 /* Find L2 entry for the first involved cluster */ 1143 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1144 if (ret < 0) { 1145 return ret; 1146 } 1147 1148 entry = be64_to_cpu(l2_table[l2_index]); 1149 1150 /* For the moment, overwrite compressed clusters one by one */ 1151 if (entry & QCOW_OFLAG_COMPRESSED) { 1152 nb_clusters = 1; 1153 } else { 1154 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1155 } 1156 1157 /* This function is only called when there were no non-COW clusters, so if 1158 * we can't find any unallocated or COW clusters either, something is 1159 * wrong with our code. */ 1160 assert(nb_clusters > 0); 1161 1162 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1163 1164 /* Allocate, if necessary at a given offset in the image file */ 1165 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1166 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1167 &nb_clusters); 1168 if (ret < 0) { 1169 goto fail; 1170 } 1171 1172 /* Can't extend contiguous allocation */ 1173 if (nb_clusters == 0) { 1174 *bytes = 0; 1175 return 0; 1176 } 1177 1178 /* !*host_offset would overwrite the image header and is reserved for "no 1179 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1180 * following overlap check; do that now to avoid having an invalid value in 1181 * *host_offset. */ 1182 if (!alloc_cluster_offset) { 1183 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1184 nb_clusters * s->cluster_size); 1185 assert(ret < 0); 1186 goto fail; 1187 } 1188 1189 /* 1190 * Save info needed for meta data update. 1191 * 1192 * requested_sectors: Number of sectors from the start of the first 1193 * newly allocated cluster to the end of the (possibly shortened 1194 * before) write request. 1195 * 1196 * avail_sectors: Number of sectors from the start of the first 1197 * newly allocated to the end of the last newly allocated cluster. 1198 * 1199 * nb_sectors: The number of sectors from the start of the first 1200 * newly allocated cluster to the end of the area that the write 1201 * request actually writes to (excluding COW at the end) 1202 */ 1203 int requested_sectors = 1204 (*bytes + offset_into_cluster(s, guest_offset)) 1205 >> BDRV_SECTOR_BITS; 1206 int avail_sectors = nb_clusters 1207 << (s->cluster_bits - BDRV_SECTOR_BITS); 1208 int alloc_n_start = offset_into_cluster(s, guest_offset) 1209 >> BDRV_SECTOR_BITS; 1210 int nb_sectors = MIN(requested_sectors, avail_sectors); 1211 QCowL2Meta *old_m = *m; 1212 1213 *m = g_malloc0(sizeof(**m)); 1214 1215 **m = (QCowL2Meta) { 1216 .next = old_m, 1217 1218 .alloc_offset = alloc_cluster_offset, 1219 .offset = start_of_cluster(s, guest_offset), 1220 .nb_clusters = nb_clusters, 1221 .nb_available = nb_sectors, 1222 1223 .cow_start = { 1224 .offset = 0, 1225 .nb_sectors = alloc_n_start, 1226 }, 1227 .cow_end = { 1228 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1229 .nb_sectors = avail_sectors - nb_sectors, 1230 }, 1231 }; 1232 qemu_co_queue_init(&(*m)->dependent_requests); 1233 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1234 1235 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1236 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1237 - offset_into_cluster(s, guest_offset)); 1238 assert(*bytes != 0); 1239 1240 return 1; 1241 1242 fail: 1243 if (*m && (*m)->nb_clusters > 0) { 1244 QLIST_REMOVE(*m, next_in_flight); 1245 } 1246 return ret; 1247 } 1248 1249 /* 1250 * alloc_cluster_offset 1251 * 1252 * For a given offset on the virtual disk, find the cluster offset in qcow2 1253 * file. If the offset is not found, allocate a new cluster. 1254 * 1255 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1256 * other fields in m are meaningless. 1257 * 1258 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1259 * contiguous clusters that have been allocated. In this case, the other 1260 * fields of m are valid and contain information about the first allocated 1261 * cluster. 1262 * 1263 * If the request conflicts with another write request in flight, the coroutine 1264 * is queued and will be reentered when the dependency has completed. 1265 * 1266 * Return 0 on success and -errno in error cases 1267 */ 1268 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1269 int *num, uint64_t *host_offset, QCowL2Meta **m) 1270 { 1271 BDRVQcow2State *s = bs->opaque; 1272 uint64_t start, remaining; 1273 uint64_t cluster_offset; 1274 uint64_t cur_bytes; 1275 int ret; 1276 1277 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1278 1279 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1280 1281 again: 1282 start = offset; 1283 remaining = (uint64_t)*num << BDRV_SECTOR_BITS; 1284 cluster_offset = 0; 1285 *host_offset = 0; 1286 cur_bytes = 0; 1287 *m = NULL; 1288 1289 while (true) { 1290 1291 if (!*host_offset) { 1292 *host_offset = start_of_cluster(s, cluster_offset); 1293 } 1294 1295 assert(remaining >= cur_bytes); 1296 1297 start += cur_bytes; 1298 remaining -= cur_bytes; 1299 cluster_offset += cur_bytes; 1300 1301 if (remaining == 0) { 1302 break; 1303 } 1304 1305 cur_bytes = remaining; 1306 1307 /* 1308 * Now start gathering as many contiguous clusters as possible: 1309 * 1310 * 1. Check for overlaps with in-flight allocations 1311 * 1312 * a) Overlap not in the first cluster -> shorten this request and 1313 * let the caller handle the rest in its next loop iteration. 1314 * 1315 * b) Real overlaps of two requests. Yield and restart the search 1316 * for contiguous clusters (the situation could have changed 1317 * while we were sleeping) 1318 * 1319 * c) TODO: Request starts in the same cluster as the in-flight 1320 * allocation ends. Shorten the COW of the in-fight allocation, 1321 * set cluster_offset to write to the same cluster and set up 1322 * the right synchronisation between the in-flight request and 1323 * the new one. 1324 */ 1325 ret = handle_dependencies(bs, start, &cur_bytes, m); 1326 if (ret == -EAGAIN) { 1327 /* Currently handle_dependencies() doesn't yield if we already had 1328 * an allocation. If it did, we would have to clean up the L2Meta 1329 * structs before starting over. */ 1330 assert(*m == NULL); 1331 goto again; 1332 } else if (ret < 0) { 1333 return ret; 1334 } else if (cur_bytes == 0) { 1335 break; 1336 } else { 1337 /* handle_dependencies() may have decreased cur_bytes (shortened 1338 * the allocations below) so that the next dependency is processed 1339 * correctly during the next loop iteration. */ 1340 } 1341 1342 /* 1343 * 2. Count contiguous COPIED clusters. 1344 */ 1345 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1346 if (ret < 0) { 1347 return ret; 1348 } else if (ret) { 1349 continue; 1350 } else if (cur_bytes == 0) { 1351 break; 1352 } 1353 1354 /* 1355 * 3. If the request still hasn't completed, allocate new clusters, 1356 * considering any cluster_offset of steps 1c or 2. 1357 */ 1358 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1359 if (ret < 0) { 1360 return ret; 1361 } else if (ret) { 1362 continue; 1363 } else { 1364 assert(cur_bytes == 0); 1365 break; 1366 } 1367 } 1368 1369 *num -= remaining >> BDRV_SECTOR_BITS; 1370 assert(*num > 0); 1371 assert(*host_offset != 0); 1372 1373 return 0; 1374 } 1375 1376 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1377 const uint8_t *buf, int buf_size) 1378 { 1379 z_stream strm1, *strm = &strm1; 1380 int ret, out_len; 1381 1382 memset(strm, 0, sizeof(*strm)); 1383 1384 strm->next_in = (uint8_t *)buf; 1385 strm->avail_in = buf_size; 1386 strm->next_out = out_buf; 1387 strm->avail_out = out_buf_size; 1388 1389 ret = inflateInit2(strm, -12); 1390 if (ret != Z_OK) 1391 return -1; 1392 ret = inflate(strm, Z_FINISH); 1393 out_len = strm->next_out - out_buf; 1394 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1395 out_len != out_buf_size) { 1396 inflateEnd(strm); 1397 return -1; 1398 } 1399 inflateEnd(strm); 1400 return 0; 1401 } 1402 1403 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1404 { 1405 BDRVQcow2State *s = bs->opaque; 1406 int ret, csize, nb_csectors, sector_offset; 1407 uint64_t coffset; 1408 1409 coffset = cluster_offset & s->cluster_offset_mask; 1410 if (s->cluster_cache_offset != coffset) { 1411 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1412 sector_offset = coffset & 511; 1413 csize = nb_csectors * 512 - sector_offset; 1414 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1415 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 1416 if (ret < 0) { 1417 return ret; 1418 } 1419 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1420 s->cluster_data + sector_offset, csize) < 0) { 1421 return -EIO; 1422 } 1423 s->cluster_cache_offset = coffset; 1424 } 1425 return 0; 1426 } 1427 1428 /* 1429 * This discards as many clusters of nb_clusters as possible at once (i.e. 1430 * all clusters in the same L2 table) and returns the number of discarded 1431 * clusters. 1432 */ 1433 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1434 uint64_t nb_clusters, enum qcow2_discard_type type, 1435 bool full_discard) 1436 { 1437 BDRVQcow2State *s = bs->opaque; 1438 uint64_t *l2_table; 1439 int l2_index; 1440 int ret; 1441 int i; 1442 1443 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1444 if (ret < 0) { 1445 return ret; 1446 } 1447 1448 /* Limit nb_clusters to one L2 table */ 1449 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1450 assert(nb_clusters <= INT_MAX); 1451 1452 for (i = 0; i < nb_clusters; i++) { 1453 uint64_t old_l2_entry; 1454 1455 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1456 1457 /* 1458 * If full_discard is false, make sure that a discarded area reads back 1459 * as zeroes for v3 images (we cannot do it for v2 without actually 1460 * writing a zero-filled buffer). We can skip the operation if the 1461 * cluster is already marked as zero, or if it's unallocated and we 1462 * don't have a backing file. 1463 * 1464 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1465 * holding s->lock, so that doesn't work today. 1466 * 1467 * If full_discard is true, the sector should not read back as zeroes, 1468 * but rather fall through to the backing file. 1469 */ 1470 switch (qcow2_get_cluster_type(old_l2_entry)) { 1471 case QCOW2_CLUSTER_UNALLOCATED: 1472 if (full_discard || !bs->backing_hd) { 1473 continue; 1474 } 1475 break; 1476 1477 case QCOW2_CLUSTER_ZERO: 1478 if (!full_discard) { 1479 continue; 1480 } 1481 break; 1482 1483 case QCOW2_CLUSTER_NORMAL: 1484 case QCOW2_CLUSTER_COMPRESSED: 1485 break; 1486 1487 default: 1488 abort(); 1489 } 1490 1491 /* First remove L2 entries */ 1492 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1493 if (!full_discard && s->qcow_version >= 3) { 1494 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1495 } else { 1496 l2_table[l2_index + i] = cpu_to_be64(0); 1497 } 1498 1499 /* Then decrease the refcount */ 1500 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1501 } 1502 1503 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1504 1505 return nb_clusters; 1506 } 1507 1508 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1509 int nb_sectors, enum qcow2_discard_type type, bool full_discard) 1510 { 1511 BDRVQcow2State *s = bs->opaque; 1512 uint64_t end_offset; 1513 uint64_t nb_clusters; 1514 int ret; 1515 1516 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1517 1518 /* Round start up and end down */ 1519 offset = align_offset(offset, s->cluster_size); 1520 end_offset = start_of_cluster(s, end_offset); 1521 1522 if (offset > end_offset) { 1523 return 0; 1524 } 1525 1526 nb_clusters = size_to_clusters(s, end_offset - offset); 1527 1528 s->cache_discards = true; 1529 1530 /* Each L2 table is handled by its own loop iteration */ 1531 while (nb_clusters > 0) { 1532 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard); 1533 if (ret < 0) { 1534 goto fail; 1535 } 1536 1537 nb_clusters -= ret; 1538 offset += (ret * s->cluster_size); 1539 } 1540 1541 ret = 0; 1542 fail: 1543 s->cache_discards = false; 1544 qcow2_process_discards(bs, ret); 1545 1546 return ret; 1547 } 1548 1549 /* 1550 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1551 * all clusters in the same L2 table) and returns the number of zeroed 1552 * clusters. 1553 */ 1554 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1555 uint64_t nb_clusters) 1556 { 1557 BDRVQcow2State *s = bs->opaque; 1558 uint64_t *l2_table; 1559 int l2_index; 1560 int ret; 1561 int i; 1562 1563 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1564 if (ret < 0) { 1565 return ret; 1566 } 1567 1568 /* Limit nb_clusters to one L2 table */ 1569 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1570 assert(nb_clusters <= INT_MAX); 1571 1572 for (i = 0; i < nb_clusters; i++) { 1573 uint64_t old_offset; 1574 1575 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1576 1577 /* Update L2 entries */ 1578 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1579 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1580 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1581 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1582 } else { 1583 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1584 } 1585 } 1586 1587 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1588 1589 return nb_clusters; 1590 } 1591 1592 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1593 { 1594 BDRVQcow2State *s = bs->opaque; 1595 uint64_t nb_clusters; 1596 int ret; 1597 1598 /* The zero flag is only supported by version 3 and newer */ 1599 if (s->qcow_version < 3) { 1600 return -ENOTSUP; 1601 } 1602 1603 /* Each L2 table is handled by its own loop iteration */ 1604 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1605 1606 s->cache_discards = true; 1607 1608 while (nb_clusters > 0) { 1609 ret = zero_single_l2(bs, offset, nb_clusters); 1610 if (ret < 0) { 1611 goto fail; 1612 } 1613 1614 nb_clusters -= ret; 1615 offset += (ret * s->cluster_size); 1616 } 1617 1618 ret = 0; 1619 fail: 1620 s->cache_discards = false; 1621 qcow2_process_discards(bs, ret); 1622 1623 return ret; 1624 } 1625 1626 /* 1627 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1628 * non-backed non-pre-allocated zero clusters). 1629 * 1630 * l1_entries and *visited_l1_entries are used to keep track of progress for 1631 * status_cb(). l1_entries contains the total number of L1 entries and 1632 * *visited_l1_entries counts all visited L1 entries. 1633 */ 1634 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1635 int l1_size, int64_t *visited_l1_entries, 1636 int64_t l1_entries, 1637 BlockDriverAmendStatusCB *status_cb) 1638 { 1639 BDRVQcow2State *s = bs->opaque; 1640 bool is_active_l1 = (l1_table == s->l1_table); 1641 uint64_t *l2_table = NULL; 1642 int ret; 1643 int i, j; 1644 1645 if (!is_active_l1) { 1646 /* inactive L2 tables require a buffer to be stored in when loading 1647 * them from disk */ 1648 l2_table = qemu_try_blockalign(bs->file, s->cluster_size); 1649 if (l2_table == NULL) { 1650 return -ENOMEM; 1651 } 1652 } 1653 1654 for (i = 0; i < l1_size; i++) { 1655 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1656 bool l2_dirty = false; 1657 uint64_t l2_refcount; 1658 1659 if (!l2_offset) { 1660 /* unallocated */ 1661 (*visited_l1_entries)++; 1662 if (status_cb) { 1663 status_cb(bs, *visited_l1_entries, l1_entries); 1664 } 1665 continue; 1666 } 1667 1668 if (offset_into_cluster(s, l2_offset)) { 1669 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1670 PRIx64 " unaligned (L1 index: %#x)", 1671 l2_offset, i); 1672 ret = -EIO; 1673 goto fail; 1674 } 1675 1676 if (is_active_l1) { 1677 /* get active L2 tables from cache */ 1678 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1679 (void **)&l2_table); 1680 } else { 1681 /* load inactive L2 tables from disk */ 1682 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1683 (void *)l2_table, s->cluster_sectors); 1684 } 1685 if (ret < 0) { 1686 goto fail; 1687 } 1688 1689 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1690 &l2_refcount); 1691 if (ret < 0) { 1692 goto fail; 1693 } 1694 1695 for (j = 0; j < s->l2_size; j++) { 1696 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1697 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1698 int cluster_type = qcow2_get_cluster_type(l2_entry); 1699 bool preallocated = offset != 0; 1700 1701 if (cluster_type != QCOW2_CLUSTER_ZERO) { 1702 continue; 1703 } 1704 1705 if (!preallocated) { 1706 if (!bs->backing_hd) { 1707 /* not backed; therefore we can simply deallocate the 1708 * cluster */ 1709 l2_table[j] = 0; 1710 l2_dirty = true; 1711 continue; 1712 } 1713 1714 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1715 if (offset < 0) { 1716 ret = offset; 1717 goto fail; 1718 } 1719 1720 if (l2_refcount > 1) { 1721 /* For shared L2 tables, set the refcount accordingly (it is 1722 * already 1 and needs to be l2_refcount) */ 1723 ret = qcow2_update_cluster_refcount(bs, 1724 offset >> s->cluster_bits, 1725 refcount_diff(1, l2_refcount), false, 1726 QCOW2_DISCARD_OTHER); 1727 if (ret < 0) { 1728 qcow2_free_clusters(bs, offset, s->cluster_size, 1729 QCOW2_DISCARD_OTHER); 1730 goto fail; 1731 } 1732 } 1733 } 1734 1735 if (offset_into_cluster(s, offset)) { 1736 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1737 "%#" PRIx64 " unaligned (L2 offset: %#" 1738 PRIx64 ", L2 index: %#x)", offset, 1739 l2_offset, j); 1740 if (!preallocated) { 1741 qcow2_free_clusters(bs, offset, s->cluster_size, 1742 QCOW2_DISCARD_ALWAYS); 1743 } 1744 ret = -EIO; 1745 goto fail; 1746 } 1747 1748 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1749 if (ret < 0) { 1750 if (!preallocated) { 1751 qcow2_free_clusters(bs, offset, s->cluster_size, 1752 QCOW2_DISCARD_ALWAYS); 1753 } 1754 goto fail; 1755 } 1756 1757 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, 1758 s->cluster_sectors, 0); 1759 if (ret < 0) { 1760 if (!preallocated) { 1761 qcow2_free_clusters(bs, offset, s->cluster_size, 1762 QCOW2_DISCARD_ALWAYS); 1763 } 1764 goto fail; 1765 } 1766 1767 if (l2_refcount == 1) { 1768 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1769 } else { 1770 l2_table[j] = cpu_to_be64(offset); 1771 } 1772 l2_dirty = true; 1773 } 1774 1775 if (is_active_l1) { 1776 if (l2_dirty) { 1777 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1778 qcow2_cache_depends_on_flush(s->l2_table_cache); 1779 } 1780 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1781 } else { 1782 if (l2_dirty) { 1783 ret = qcow2_pre_write_overlap_check(bs, 1784 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1785 s->cluster_size); 1786 if (ret < 0) { 1787 goto fail; 1788 } 1789 1790 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1791 (void *)l2_table, s->cluster_sectors); 1792 if (ret < 0) { 1793 goto fail; 1794 } 1795 } 1796 } 1797 1798 (*visited_l1_entries)++; 1799 if (status_cb) { 1800 status_cb(bs, *visited_l1_entries, l1_entries); 1801 } 1802 } 1803 1804 ret = 0; 1805 1806 fail: 1807 if (l2_table) { 1808 if (!is_active_l1) { 1809 qemu_vfree(l2_table); 1810 } else { 1811 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1812 } 1813 } 1814 return ret; 1815 } 1816 1817 /* 1818 * For backed images, expands all zero clusters on the image. For non-backed 1819 * images, deallocates all non-pre-allocated zero clusters (and claims the 1820 * allocation for pre-allocated ones). This is important for downgrading to a 1821 * qcow2 version which doesn't yet support metadata zero clusters. 1822 */ 1823 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1824 BlockDriverAmendStatusCB *status_cb) 1825 { 1826 BDRVQcow2State *s = bs->opaque; 1827 uint64_t *l1_table = NULL; 1828 int64_t l1_entries = 0, visited_l1_entries = 0; 1829 int ret; 1830 int i, j; 1831 1832 if (status_cb) { 1833 l1_entries = s->l1_size; 1834 for (i = 0; i < s->nb_snapshots; i++) { 1835 l1_entries += s->snapshots[i].l1_size; 1836 } 1837 } 1838 1839 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1840 &visited_l1_entries, l1_entries, 1841 status_cb); 1842 if (ret < 0) { 1843 goto fail; 1844 } 1845 1846 /* Inactive L1 tables may point to active L2 tables - therefore it is 1847 * necessary to flush the L2 table cache before trying to access the L2 1848 * tables pointed to by inactive L1 entries (else we might try to expand 1849 * zero clusters that have already been expanded); furthermore, it is also 1850 * necessary to empty the L2 table cache, since it may contain tables which 1851 * are now going to be modified directly on disk, bypassing the cache. 1852 * qcow2_cache_empty() does both for us. */ 1853 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1854 if (ret < 0) { 1855 goto fail; 1856 } 1857 1858 for (i = 0; i < s->nb_snapshots; i++) { 1859 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1860 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1861 1862 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1863 1864 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / 1865 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); 1866 if (ret < 0) { 1867 goto fail; 1868 } 1869 1870 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1871 be64_to_cpus(&l1_table[j]); 1872 } 1873 1874 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1875 &visited_l1_entries, l1_entries, 1876 status_cb); 1877 if (ret < 0) { 1878 goto fail; 1879 } 1880 } 1881 1882 ret = 0; 1883 1884 fail: 1885 g_free(l1_table); 1886 return ret; 1887 } 1888