1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include <zlib.h> 27 28 #include "qapi/error.h" 29 #include "qemu-common.h" 30 #include "block/block_int.h" 31 #include "block/qcow2.h" 32 #include "trace.h" 33 34 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 35 bool exact_size) 36 { 37 BDRVQcow2State *s = bs->opaque; 38 int new_l1_size2, ret, i; 39 uint64_t *new_l1_table; 40 int64_t old_l1_table_offset, old_l1_size; 41 int64_t new_l1_table_offset, new_l1_size; 42 uint8_t data[12]; 43 44 if (min_size <= s->l1_size) 45 return 0; 46 47 /* Do a sanity check on min_size before trying to calculate new_l1_size 48 * (this prevents overflows during the while loop for the calculation of 49 * new_l1_size) */ 50 if (min_size > INT_MAX / sizeof(uint64_t)) { 51 return -EFBIG; 52 } 53 54 if (exact_size) { 55 new_l1_size = min_size; 56 } else { 57 /* Bump size up to reduce the number of times we have to grow */ 58 new_l1_size = s->l1_size; 59 if (new_l1_size == 0) { 60 new_l1_size = 1; 61 } 62 while (min_size > new_l1_size) { 63 new_l1_size = (new_l1_size * 3 + 1) / 2; 64 } 65 } 66 67 if (new_l1_size > INT_MAX / sizeof(uint64_t)) { 68 return -EFBIG; 69 } 70 71 #ifdef DEBUG_ALLOC2 72 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 73 s->l1_size, new_l1_size); 74 #endif 75 76 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 77 new_l1_table = qemu_try_blockalign(bs->file->bs, 78 align_offset(new_l1_size2, 512)); 79 if (new_l1_table == NULL) { 80 return -ENOMEM; 81 } 82 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 83 84 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 85 86 /* write new table (align to cluster) */ 87 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 88 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 89 if (new_l1_table_offset < 0) { 90 qemu_vfree(new_l1_table); 91 return new_l1_table_offset; 92 } 93 94 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 95 if (ret < 0) { 96 goto fail; 97 } 98 99 /* the L1 position has not yet been updated, so these clusters must 100 * indeed be completely free */ 101 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 102 new_l1_size2); 103 if (ret < 0) { 104 goto fail; 105 } 106 107 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 108 for(i = 0; i < s->l1_size; i++) 109 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 110 ret = bdrv_pwrite_sync(bs->file->bs, new_l1_table_offset, 111 new_l1_table, new_l1_size2); 112 if (ret < 0) 113 goto fail; 114 for(i = 0; i < s->l1_size; i++) 115 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 116 117 /* set new table */ 118 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 119 cpu_to_be32w((uint32_t*)data, new_l1_size); 120 stq_be_p(data + 4, new_l1_table_offset); 121 ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_size), 122 data, sizeof(data)); 123 if (ret < 0) { 124 goto fail; 125 } 126 qemu_vfree(s->l1_table); 127 old_l1_table_offset = s->l1_table_offset; 128 s->l1_table_offset = new_l1_table_offset; 129 s->l1_table = new_l1_table; 130 old_l1_size = s->l1_size; 131 s->l1_size = new_l1_size; 132 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 133 QCOW2_DISCARD_OTHER); 134 return 0; 135 fail: 136 qemu_vfree(new_l1_table); 137 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 138 QCOW2_DISCARD_OTHER); 139 return ret; 140 } 141 142 /* 143 * l2_load 144 * 145 * Loads a L2 table into memory. If the table is in the cache, the cache 146 * is used; otherwise the L2 table is loaded from the image file. 147 * 148 * Returns a pointer to the L2 table on success, or NULL if the read from 149 * the image file failed. 150 */ 151 152 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 153 uint64_t **l2_table) 154 { 155 BDRVQcow2State *s = bs->opaque; 156 int ret; 157 158 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 159 160 return ret; 161 } 162 163 /* 164 * Writes one sector of the L1 table to the disk (can't update single entries 165 * and we really don't want bdrv_pread to perform a read-modify-write) 166 */ 167 #define L1_ENTRIES_PER_SECTOR (512 / 8) 168 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 169 { 170 BDRVQcow2State *s = bs->opaque; 171 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 172 int l1_start_index; 173 int i, ret; 174 175 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 176 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 177 i++) 178 { 179 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 180 } 181 182 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 183 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 184 if (ret < 0) { 185 return ret; 186 } 187 188 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 189 ret = bdrv_pwrite_sync(bs->file->bs, 190 s->l1_table_offset + 8 * l1_start_index, 191 buf, sizeof(buf)); 192 if (ret < 0) { 193 return ret; 194 } 195 196 return 0; 197 } 198 199 /* 200 * l2_allocate 201 * 202 * Allocate a new l2 entry in the file. If l1_index points to an already 203 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 204 * table) copy the contents of the old L2 table into the newly allocated one. 205 * Otherwise the new table is initialized with zeros. 206 * 207 */ 208 209 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 210 { 211 BDRVQcow2State *s = bs->opaque; 212 uint64_t old_l2_offset; 213 uint64_t *l2_table = NULL; 214 int64_t l2_offset; 215 int ret; 216 217 old_l2_offset = s->l1_table[l1_index]; 218 219 trace_qcow2_l2_allocate(bs, l1_index); 220 221 /* allocate a new l2 entry */ 222 223 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 224 if (l2_offset < 0) { 225 ret = l2_offset; 226 goto fail; 227 } 228 229 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 230 if (ret < 0) { 231 goto fail; 232 } 233 234 /* allocate a new entry in the l2 cache */ 235 236 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 237 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 238 if (ret < 0) { 239 goto fail; 240 } 241 242 l2_table = *table; 243 244 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 245 /* if there was no old l2 table, clear the new table */ 246 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 247 } else { 248 uint64_t* old_table; 249 250 /* if there was an old l2 table, read it from the disk */ 251 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 252 ret = qcow2_cache_get(bs, s->l2_table_cache, 253 old_l2_offset & L1E_OFFSET_MASK, 254 (void**) &old_table); 255 if (ret < 0) { 256 goto fail; 257 } 258 259 memcpy(l2_table, old_table, s->cluster_size); 260 261 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 262 } 263 264 /* write the l2 table to the file */ 265 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 266 267 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 268 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 269 ret = qcow2_cache_flush(bs, s->l2_table_cache); 270 if (ret < 0) { 271 goto fail; 272 } 273 274 /* update the L1 entry */ 275 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 276 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 277 ret = qcow2_write_l1_entry(bs, l1_index); 278 if (ret < 0) { 279 goto fail; 280 } 281 282 *table = l2_table; 283 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 284 return 0; 285 286 fail: 287 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 288 if (l2_table != NULL) { 289 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 290 } 291 s->l1_table[l1_index] = old_l2_offset; 292 if (l2_offset > 0) { 293 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 294 QCOW2_DISCARD_ALWAYS); 295 } 296 return ret; 297 } 298 299 /* 300 * Checks how many clusters in a given L2 table are contiguous in the image 301 * file. As soon as one of the flags in the bitmask stop_flags changes compared 302 * to the first cluster, the search is stopped and the cluster is not counted 303 * as contiguous. (This allows it, for example, to stop at the first compressed 304 * cluster which may require a different handling) 305 */ 306 static int count_contiguous_clusters(int nb_clusters, int cluster_size, 307 uint64_t *l2_table, uint64_t stop_flags) 308 { 309 int i; 310 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 311 uint64_t first_entry = be64_to_cpu(l2_table[0]); 312 uint64_t offset = first_entry & mask; 313 314 if (!offset) 315 return 0; 316 317 assert(qcow2_get_cluster_type(first_entry) == QCOW2_CLUSTER_NORMAL); 318 319 for (i = 0; i < nb_clusters; i++) { 320 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 321 if (offset + (uint64_t) i * cluster_size != l2_entry) { 322 break; 323 } 324 } 325 326 return i; 327 } 328 329 static int count_contiguous_clusters_by_type(int nb_clusters, 330 uint64_t *l2_table, 331 int wanted_type) 332 { 333 int i; 334 335 for (i = 0; i < nb_clusters; i++) { 336 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 337 338 if (type != wanted_type) { 339 break; 340 } 341 } 342 343 return i; 344 } 345 346 /* The crypt function is compatible with the linux cryptoloop 347 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 348 supported */ 349 int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num, 350 uint8_t *out_buf, const uint8_t *in_buf, 351 int nb_sectors, bool enc, 352 Error **errp) 353 { 354 union { 355 uint64_t ll[2]; 356 uint8_t b[16]; 357 } ivec; 358 int i; 359 int ret; 360 361 for(i = 0; i < nb_sectors; i++) { 362 ivec.ll[0] = cpu_to_le64(sector_num); 363 ivec.ll[1] = 0; 364 if (qcrypto_cipher_setiv(s->cipher, 365 ivec.b, G_N_ELEMENTS(ivec.b), 366 errp) < 0) { 367 return -1; 368 } 369 if (enc) { 370 ret = qcrypto_cipher_encrypt(s->cipher, 371 in_buf, 372 out_buf, 373 512, 374 errp); 375 } else { 376 ret = qcrypto_cipher_decrypt(s->cipher, 377 in_buf, 378 out_buf, 379 512, 380 errp); 381 } 382 if (ret < 0) { 383 return -1; 384 } 385 sector_num++; 386 in_buf += 512; 387 out_buf += 512; 388 } 389 return 0; 390 } 391 392 static int coroutine_fn copy_sectors(BlockDriverState *bs, 393 uint64_t start_sect, 394 uint64_t cluster_offset, 395 int n_start, int n_end) 396 { 397 BDRVQcow2State *s = bs->opaque; 398 QEMUIOVector qiov; 399 struct iovec iov; 400 int n, ret; 401 402 n = n_end - n_start; 403 if (n <= 0) { 404 return 0; 405 } 406 407 iov.iov_len = n * BDRV_SECTOR_SIZE; 408 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 409 if (iov.iov_base == NULL) { 410 return -ENOMEM; 411 } 412 413 qemu_iovec_init_external(&qiov, &iov, 1); 414 415 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 416 417 if (!bs->drv) { 418 ret = -ENOMEDIUM; 419 goto out; 420 } 421 422 /* Call .bdrv_co_readv() directly instead of using the public block-layer 423 * interface. This avoids double I/O throttling and request tracking, 424 * which can lead to deadlock when block layer copy-on-read is enabled. 425 */ 426 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 427 if (ret < 0) { 428 goto out; 429 } 430 431 if (bs->encrypted) { 432 Error *err = NULL; 433 assert(s->cipher); 434 if (qcow2_encrypt_sectors(s, start_sect + n_start, 435 iov.iov_base, iov.iov_base, n, 436 true, &err) < 0) { 437 ret = -EIO; 438 error_free(err); 439 goto out; 440 } 441 } 442 443 ret = qcow2_pre_write_overlap_check(bs, 0, 444 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 445 if (ret < 0) { 446 goto out; 447 } 448 449 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 450 ret = bdrv_co_writev(bs->file->bs, (cluster_offset >> 9) + n_start, n, 451 &qiov); 452 if (ret < 0) { 453 goto out; 454 } 455 456 ret = 0; 457 out: 458 qemu_vfree(iov.iov_base); 459 return ret; 460 } 461 462 463 /* 464 * get_cluster_offset 465 * 466 * For a given offset of the disk image, find the cluster offset in 467 * qcow2 file. The offset is stored in *cluster_offset. 468 * 469 * on entry, *num is the number of contiguous sectors we'd like to 470 * access following offset. 471 * 472 * on exit, *num is the number of contiguous sectors we can read. 473 * 474 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 475 * cases. 476 */ 477 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 478 int *num, uint64_t *cluster_offset) 479 { 480 BDRVQcow2State *s = bs->opaque; 481 unsigned int l2_index; 482 uint64_t l1_index, l2_offset, *l2_table; 483 int l1_bits, c; 484 unsigned int index_in_cluster, nb_clusters; 485 uint64_t nb_available, nb_needed; 486 int ret; 487 488 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 489 nb_needed = *num + index_in_cluster; 490 491 l1_bits = s->l2_bits + s->cluster_bits; 492 493 /* compute how many bytes there are between the offset and 494 * the end of the l1 entry 495 */ 496 497 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 498 499 /* compute the number of available sectors */ 500 501 nb_available = (nb_available >> 9) + index_in_cluster; 502 503 if (nb_needed > nb_available) { 504 nb_needed = nb_available; 505 } 506 assert(nb_needed <= INT_MAX); 507 508 *cluster_offset = 0; 509 510 /* seek to the l2 offset in the l1 table */ 511 512 l1_index = offset >> l1_bits; 513 if (l1_index >= s->l1_size) { 514 ret = QCOW2_CLUSTER_UNALLOCATED; 515 goto out; 516 } 517 518 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 519 if (!l2_offset) { 520 ret = QCOW2_CLUSTER_UNALLOCATED; 521 goto out; 522 } 523 524 if (offset_into_cluster(s, l2_offset)) { 525 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 526 " unaligned (L1 index: %#" PRIx64 ")", 527 l2_offset, l1_index); 528 return -EIO; 529 } 530 531 /* load the l2 table in memory */ 532 533 ret = l2_load(bs, l2_offset, &l2_table); 534 if (ret < 0) { 535 return ret; 536 } 537 538 /* find the cluster offset for the given disk offset */ 539 540 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 541 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 542 543 /* nb_needed <= INT_MAX, thus nb_clusters <= INT_MAX, too */ 544 nb_clusters = size_to_clusters(s, nb_needed << 9); 545 546 ret = qcow2_get_cluster_type(*cluster_offset); 547 switch (ret) { 548 case QCOW2_CLUSTER_COMPRESSED: 549 /* Compressed clusters can only be processed one by one */ 550 c = 1; 551 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 552 break; 553 case QCOW2_CLUSTER_ZERO: 554 if (s->qcow_version < 3) { 555 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 556 " in pre-v3 image (L2 offset: %#" PRIx64 557 ", L2 index: %#x)", l2_offset, l2_index); 558 ret = -EIO; 559 goto fail; 560 } 561 c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], 562 QCOW2_CLUSTER_ZERO); 563 *cluster_offset = 0; 564 break; 565 case QCOW2_CLUSTER_UNALLOCATED: 566 /* how many empty clusters ? */ 567 c = count_contiguous_clusters_by_type(nb_clusters, &l2_table[l2_index], 568 QCOW2_CLUSTER_UNALLOCATED); 569 *cluster_offset = 0; 570 break; 571 case QCOW2_CLUSTER_NORMAL: 572 /* how many allocated clusters ? */ 573 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 574 &l2_table[l2_index], QCOW_OFLAG_ZERO); 575 *cluster_offset &= L2E_OFFSET_MASK; 576 if (offset_into_cluster(s, *cluster_offset)) { 577 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 578 PRIx64 " unaligned (L2 offset: %#" PRIx64 579 ", L2 index: %#x)", *cluster_offset, 580 l2_offset, l2_index); 581 ret = -EIO; 582 goto fail; 583 } 584 break; 585 default: 586 abort(); 587 } 588 589 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 590 591 nb_available = (c * s->cluster_sectors); 592 593 out: 594 if (nb_available > nb_needed) 595 nb_available = nb_needed; 596 597 *num = nb_available - index_in_cluster; 598 599 return ret; 600 601 fail: 602 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 603 return ret; 604 } 605 606 /* 607 * get_cluster_table 608 * 609 * for a given disk offset, load (and allocate if needed) 610 * the l2 table. 611 * 612 * the l2 table offset in the qcow2 file and the cluster index 613 * in the l2 table are given to the caller. 614 * 615 * Returns 0 on success, -errno in failure case 616 */ 617 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 618 uint64_t **new_l2_table, 619 int *new_l2_index) 620 { 621 BDRVQcow2State *s = bs->opaque; 622 unsigned int l2_index; 623 uint64_t l1_index, l2_offset; 624 uint64_t *l2_table = NULL; 625 int ret; 626 627 /* seek to the l2 offset in the l1 table */ 628 629 l1_index = offset >> (s->l2_bits + s->cluster_bits); 630 if (l1_index >= s->l1_size) { 631 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 632 if (ret < 0) { 633 return ret; 634 } 635 } 636 637 assert(l1_index < s->l1_size); 638 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 639 if (offset_into_cluster(s, l2_offset)) { 640 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 641 " unaligned (L1 index: %#" PRIx64 ")", 642 l2_offset, l1_index); 643 return -EIO; 644 } 645 646 /* seek the l2 table of the given l2 offset */ 647 648 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 649 /* load the l2 table in memory */ 650 ret = l2_load(bs, l2_offset, &l2_table); 651 if (ret < 0) { 652 return ret; 653 } 654 } else { 655 /* First allocate a new L2 table (and do COW if needed) */ 656 ret = l2_allocate(bs, l1_index, &l2_table); 657 if (ret < 0) { 658 return ret; 659 } 660 661 /* Then decrease the refcount of the old table */ 662 if (l2_offset) { 663 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 664 QCOW2_DISCARD_OTHER); 665 } 666 } 667 668 /* find the cluster offset for the given disk offset */ 669 670 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 671 672 *new_l2_table = l2_table; 673 *new_l2_index = l2_index; 674 675 return 0; 676 } 677 678 /* 679 * alloc_compressed_cluster_offset 680 * 681 * For a given offset of the disk image, return cluster offset in 682 * qcow2 file. 683 * 684 * If the offset is not found, allocate a new compressed cluster. 685 * 686 * Return the cluster offset if successful, 687 * Return 0, otherwise. 688 * 689 */ 690 691 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 692 uint64_t offset, 693 int compressed_size) 694 { 695 BDRVQcow2State *s = bs->opaque; 696 int l2_index, ret; 697 uint64_t *l2_table; 698 int64_t cluster_offset; 699 int nb_csectors; 700 701 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 702 if (ret < 0) { 703 return 0; 704 } 705 706 /* Compression can't overwrite anything. Fail if the cluster was already 707 * allocated. */ 708 cluster_offset = be64_to_cpu(l2_table[l2_index]); 709 if (cluster_offset & L2E_OFFSET_MASK) { 710 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 711 return 0; 712 } 713 714 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 715 if (cluster_offset < 0) { 716 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 717 return 0; 718 } 719 720 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 721 (cluster_offset >> 9); 722 723 cluster_offset |= QCOW_OFLAG_COMPRESSED | 724 ((uint64_t)nb_csectors << s->csize_shift); 725 726 /* update L2 table */ 727 728 /* compressed clusters never have the copied flag */ 729 730 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 731 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 732 l2_table[l2_index] = cpu_to_be64(cluster_offset); 733 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 734 735 return cluster_offset; 736 } 737 738 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 739 { 740 BDRVQcow2State *s = bs->opaque; 741 int ret; 742 743 if (r->nb_sectors == 0) { 744 return 0; 745 } 746 747 qemu_co_mutex_unlock(&s->lock); 748 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 749 r->offset / BDRV_SECTOR_SIZE, 750 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 751 qemu_co_mutex_lock(&s->lock); 752 753 if (ret < 0) { 754 return ret; 755 } 756 757 /* 758 * Before we update the L2 table to actually point to the new cluster, we 759 * need to be sure that the refcounts have been increased and COW was 760 * handled. 761 */ 762 qcow2_cache_depends_on_flush(s->l2_table_cache); 763 764 return 0; 765 } 766 767 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 768 { 769 BDRVQcow2State *s = bs->opaque; 770 int i, j = 0, l2_index, ret; 771 uint64_t *old_cluster, *l2_table; 772 uint64_t cluster_offset = m->alloc_offset; 773 774 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 775 assert(m->nb_clusters > 0); 776 777 old_cluster = g_try_new(uint64_t, m->nb_clusters); 778 if (old_cluster == NULL) { 779 ret = -ENOMEM; 780 goto err; 781 } 782 783 /* copy content of unmodified sectors */ 784 ret = perform_cow(bs, m, &m->cow_start); 785 if (ret < 0) { 786 goto err; 787 } 788 789 ret = perform_cow(bs, m, &m->cow_end); 790 if (ret < 0) { 791 goto err; 792 } 793 794 /* Update L2 table. */ 795 if (s->use_lazy_refcounts) { 796 qcow2_mark_dirty(bs); 797 } 798 if (qcow2_need_accurate_refcounts(s)) { 799 qcow2_cache_set_dependency(bs, s->l2_table_cache, 800 s->refcount_block_cache); 801 } 802 803 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 804 if (ret < 0) { 805 goto err; 806 } 807 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 808 809 assert(l2_index + m->nb_clusters <= s->l2_size); 810 for (i = 0; i < m->nb_clusters; i++) { 811 /* if two concurrent writes happen to the same unallocated cluster 812 * each write allocates separate cluster and writes data concurrently. 813 * The first one to complete updates l2 table with pointer to its 814 * cluster the second one has to do RMW (which is done above by 815 * copy_sectors()), update l2 table with its cluster pointer and free 816 * old cluster. This is what this loop does */ 817 if(l2_table[l2_index + i] != 0) 818 old_cluster[j++] = l2_table[l2_index + i]; 819 820 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 821 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 822 } 823 824 825 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 826 827 /* 828 * If this was a COW, we need to decrease the refcount of the old cluster. 829 * 830 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 831 * clusters), the next write will reuse them anyway. 832 */ 833 if (j != 0) { 834 for (i = 0; i < j; i++) { 835 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 836 QCOW2_DISCARD_NEVER); 837 } 838 } 839 840 ret = 0; 841 err: 842 g_free(old_cluster); 843 return ret; 844 } 845 846 /* 847 * Returns the number of contiguous clusters that can be used for an allocating 848 * write, but require COW to be performed (this includes yet unallocated space, 849 * which must copy from the backing file) 850 */ 851 static int count_cow_clusters(BDRVQcow2State *s, int nb_clusters, 852 uint64_t *l2_table, int l2_index) 853 { 854 int i; 855 856 for (i = 0; i < nb_clusters; i++) { 857 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 858 int cluster_type = qcow2_get_cluster_type(l2_entry); 859 860 switch(cluster_type) { 861 case QCOW2_CLUSTER_NORMAL: 862 if (l2_entry & QCOW_OFLAG_COPIED) { 863 goto out; 864 } 865 break; 866 case QCOW2_CLUSTER_UNALLOCATED: 867 case QCOW2_CLUSTER_COMPRESSED: 868 case QCOW2_CLUSTER_ZERO: 869 break; 870 default: 871 abort(); 872 } 873 } 874 875 out: 876 assert(i <= nb_clusters); 877 return i; 878 } 879 880 /* 881 * Check if there already is an AIO write request in flight which allocates 882 * the same cluster. In this case we need to wait until the previous 883 * request has completed and updated the L2 table accordingly. 884 * 885 * Returns: 886 * 0 if there was no dependency. *cur_bytes indicates the number of 887 * bytes from guest_offset that can be read before the next 888 * dependency must be processed (or the request is complete) 889 * 890 * -EAGAIN if we had to wait for another request, previously gathered 891 * information on cluster allocation may be invalid now. The caller 892 * must start over anyway, so consider *cur_bytes undefined. 893 */ 894 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 895 uint64_t *cur_bytes, QCowL2Meta **m) 896 { 897 BDRVQcow2State *s = bs->opaque; 898 QCowL2Meta *old_alloc; 899 uint64_t bytes = *cur_bytes; 900 901 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 902 903 uint64_t start = guest_offset; 904 uint64_t end = start + bytes; 905 uint64_t old_start = l2meta_cow_start(old_alloc); 906 uint64_t old_end = l2meta_cow_end(old_alloc); 907 908 if (end <= old_start || start >= old_end) { 909 /* No intersection */ 910 } else { 911 if (start < old_start) { 912 /* Stop at the start of a running allocation */ 913 bytes = old_start - start; 914 } else { 915 bytes = 0; 916 } 917 918 /* Stop if already an l2meta exists. After yielding, it wouldn't 919 * be valid any more, so we'd have to clean up the old L2Metas 920 * and deal with requests depending on them before starting to 921 * gather new ones. Not worth the trouble. */ 922 if (bytes == 0 && *m) { 923 *cur_bytes = 0; 924 return 0; 925 } 926 927 if (bytes == 0) { 928 /* Wait for the dependency to complete. We need to recheck 929 * the free/allocated clusters when we continue. */ 930 qemu_co_mutex_unlock(&s->lock); 931 qemu_co_queue_wait(&old_alloc->dependent_requests); 932 qemu_co_mutex_lock(&s->lock); 933 return -EAGAIN; 934 } 935 } 936 } 937 938 /* Make sure that existing clusters and new allocations are only used up to 939 * the next dependency if we shortened the request above */ 940 *cur_bytes = bytes; 941 942 return 0; 943 } 944 945 /* 946 * Checks how many already allocated clusters that don't require a copy on 947 * write there are at the given guest_offset (up to *bytes). If 948 * *host_offset is not zero, only physically contiguous clusters beginning at 949 * this host offset are counted. 950 * 951 * Note that guest_offset may not be cluster aligned. In this case, the 952 * returned *host_offset points to exact byte referenced by guest_offset and 953 * therefore isn't cluster aligned as well. 954 * 955 * Returns: 956 * 0: if no allocated clusters are available at the given offset. 957 * *bytes is normally unchanged. It is set to 0 if the cluster 958 * is allocated and doesn't need COW, but doesn't have the right 959 * physical offset. 960 * 961 * 1: if allocated clusters that don't require a COW are available at 962 * the requested offset. *bytes may have decreased and describes 963 * the length of the area that can be written to. 964 * 965 * -errno: in error cases 966 */ 967 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 968 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 969 { 970 BDRVQcow2State *s = bs->opaque; 971 int l2_index; 972 uint64_t cluster_offset; 973 uint64_t *l2_table; 974 uint64_t nb_clusters; 975 unsigned int keep_clusters; 976 int ret; 977 978 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 979 *bytes); 980 981 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 982 == offset_into_cluster(s, *host_offset)); 983 984 /* 985 * Calculate the number of clusters to look for. We stop at L2 table 986 * boundaries to keep things simple. 987 */ 988 nb_clusters = 989 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 990 991 l2_index = offset_to_l2_index(s, guest_offset); 992 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 993 assert(nb_clusters <= INT_MAX); 994 995 /* Find L2 entry for the first involved cluster */ 996 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 997 if (ret < 0) { 998 return ret; 999 } 1000 1001 cluster_offset = be64_to_cpu(l2_table[l2_index]); 1002 1003 /* Check how many clusters are already allocated and don't need COW */ 1004 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 1005 && (cluster_offset & QCOW_OFLAG_COPIED)) 1006 { 1007 /* If a specific host_offset is required, check it */ 1008 bool offset_matches = 1009 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 1010 1011 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1012 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1013 "%#llx unaligned (guest offset: %#" PRIx64 1014 ")", cluster_offset & L2E_OFFSET_MASK, 1015 guest_offset); 1016 ret = -EIO; 1017 goto out; 1018 } 1019 1020 if (*host_offset != 0 && !offset_matches) { 1021 *bytes = 0; 1022 ret = 0; 1023 goto out; 1024 } 1025 1026 /* We keep all QCOW_OFLAG_COPIED clusters */ 1027 keep_clusters = 1028 count_contiguous_clusters(nb_clusters, s->cluster_size, 1029 &l2_table[l2_index], 1030 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1031 assert(keep_clusters <= nb_clusters); 1032 1033 *bytes = MIN(*bytes, 1034 keep_clusters * s->cluster_size 1035 - offset_into_cluster(s, guest_offset)); 1036 1037 ret = 1; 1038 } else { 1039 ret = 0; 1040 } 1041 1042 /* Cleanup */ 1043 out: 1044 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1045 1046 /* Only return a host offset if we actually made progress. Otherwise we 1047 * would make requirements for handle_alloc() that it can't fulfill */ 1048 if (ret > 0) { 1049 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1050 + offset_into_cluster(s, guest_offset); 1051 } 1052 1053 return ret; 1054 } 1055 1056 /* 1057 * Allocates new clusters for the given guest_offset. 1058 * 1059 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1060 * contain the number of clusters that have been allocated and are contiguous 1061 * in the image file. 1062 * 1063 * If *host_offset is non-zero, it specifies the offset in the image file at 1064 * which the new clusters must start. *nb_clusters can be 0 on return in this 1065 * case if the cluster at host_offset is already in use. If *host_offset is 1066 * zero, the clusters can be allocated anywhere in the image file. 1067 * 1068 * *host_offset is updated to contain the offset into the image file at which 1069 * the first allocated cluster starts. 1070 * 1071 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1072 * function has been waiting for another request and the allocation must be 1073 * restarted, but the whole request should not be failed. 1074 */ 1075 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1076 uint64_t *host_offset, uint64_t *nb_clusters) 1077 { 1078 BDRVQcow2State *s = bs->opaque; 1079 1080 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1081 *host_offset, *nb_clusters); 1082 1083 /* Allocate new clusters */ 1084 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1085 if (*host_offset == 0) { 1086 int64_t cluster_offset = 1087 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1088 if (cluster_offset < 0) { 1089 return cluster_offset; 1090 } 1091 *host_offset = cluster_offset; 1092 return 0; 1093 } else { 1094 int64_t ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1095 if (ret < 0) { 1096 return ret; 1097 } 1098 *nb_clusters = ret; 1099 return 0; 1100 } 1101 } 1102 1103 /* 1104 * Allocates new clusters for an area that either is yet unallocated or needs a 1105 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1106 * the new allocation can match the specified host offset. 1107 * 1108 * Note that guest_offset may not be cluster aligned. In this case, the 1109 * returned *host_offset points to exact byte referenced by guest_offset and 1110 * therefore isn't cluster aligned as well. 1111 * 1112 * Returns: 1113 * 0: if no clusters could be allocated. *bytes is set to 0, 1114 * *host_offset is left unchanged. 1115 * 1116 * 1: if new clusters were allocated. *bytes may be decreased if the 1117 * new allocation doesn't cover all of the requested area. 1118 * *host_offset is updated to contain the host offset of the first 1119 * newly allocated cluster. 1120 * 1121 * -errno: in error cases 1122 */ 1123 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1124 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1125 { 1126 BDRVQcow2State *s = bs->opaque; 1127 int l2_index; 1128 uint64_t *l2_table; 1129 uint64_t entry; 1130 uint64_t nb_clusters; 1131 int ret; 1132 1133 uint64_t alloc_cluster_offset; 1134 1135 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1136 *bytes); 1137 assert(*bytes > 0); 1138 1139 /* 1140 * Calculate the number of clusters to look for. We stop at L2 table 1141 * boundaries to keep things simple. 1142 */ 1143 nb_clusters = 1144 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1145 1146 l2_index = offset_to_l2_index(s, guest_offset); 1147 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1148 assert(nb_clusters <= INT_MAX); 1149 1150 /* Find L2 entry for the first involved cluster */ 1151 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1152 if (ret < 0) { 1153 return ret; 1154 } 1155 1156 entry = be64_to_cpu(l2_table[l2_index]); 1157 1158 /* For the moment, overwrite compressed clusters one by one */ 1159 if (entry & QCOW_OFLAG_COMPRESSED) { 1160 nb_clusters = 1; 1161 } else { 1162 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1163 } 1164 1165 /* This function is only called when there were no non-COW clusters, so if 1166 * we can't find any unallocated or COW clusters either, something is 1167 * wrong with our code. */ 1168 assert(nb_clusters > 0); 1169 1170 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1171 1172 /* Allocate, if necessary at a given offset in the image file */ 1173 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1174 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1175 &nb_clusters); 1176 if (ret < 0) { 1177 goto fail; 1178 } 1179 1180 /* Can't extend contiguous allocation */ 1181 if (nb_clusters == 0) { 1182 *bytes = 0; 1183 return 0; 1184 } 1185 1186 /* !*host_offset would overwrite the image header and is reserved for "no 1187 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1188 * following overlap check; do that now to avoid having an invalid value in 1189 * *host_offset. */ 1190 if (!alloc_cluster_offset) { 1191 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1192 nb_clusters * s->cluster_size); 1193 assert(ret < 0); 1194 goto fail; 1195 } 1196 1197 /* 1198 * Save info needed for meta data update. 1199 * 1200 * requested_sectors: Number of sectors from the start of the first 1201 * newly allocated cluster to the end of the (possibly shortened 1202 * before) write request. 1203 * 1204 * avail_sectors: Number of sectors from the start of the first 1205 * newly allocated to the end of the last newly allocated cluster. 1206 * 1207 * nb_sectors: The number of sectors from the start of the first 1208 * newly allocated cluster to the end of the area that the write 1209 * request actually writes to (excluding COW at the end) 1210 */ 1211 int requested_sectors = 1212 (*bytes + offset_into_cluster(s, guest_offset)) 1213 >> BDRV_SECTOR_BITS; 1214 int avail_sectors = nb_clusters 1215 << (s->cluster_bits - BDRV_SECTOR_BITS); 1216 int alloc_n_start = offset_into_cluster(s, guest_offset) 1217 >> BDRV_SECTOR_BITS; 1218 int nb_sectors = MIN(requested_sectors, avail_sectors); 1219 QCowL2Meta *old_m = *m; 1220 1221 *m = g_malloc0(sizeof(**m)); 1222 1223 **m = (QCowL2Meta) { 1224 .next = old_m, 1225 1226 .alloc_offset = alloc_cluster_offset, 1227 .offset = start_of_cluster(s, guest_offset), 1228 .nb_clusters = nb_clusters, 1229 .nb_available = nb_sectors, 1230 1231 .cow_start = { 1232 .offset = 0, 1233 .nb_sectors = alloc_n_start, 1234 }, 1235 .cow_end = { 1236 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1237 .nb_sectors = avail_sectors - nb_sectors, 1238 }, 1239 }; 1240 qemu_co_queue_init(&(*m)->dependent_requests); 1241 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1242 1243 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1244 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1245 - offset_into_cluster(s, guest_offset)); 1246 assert(*bytes != 0); 1247 1248 return 1; 1249 1250 fail: 1251 if (*m && (*m)->nb_clusters > 0) { 1252 QLIST_REMOVE(*m, next_in_flight); 1253 } 1254 return ret; 1255 } 1256 1257 /* 1258 * alloc_cluster_offset 1259 * 1260 * For a given offset on the virtual disk, find the cluster offset in qcow2 1261 * file. If the offset is not found, allocate a new cluster. 1262 * 1263 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1264 * other fields in m are meaningless. 1265 * 1266 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1267 * contiguous clusters that have been allocated. In this case, the other 1268 * fields of m are valid and contain information about the first allocated 1269 * cluster. 1270 * 1271 * If the request conflicts with another write request in flight, the coroutine 1272 * is queued and will be reentered when the dependency has completed. 1273 * 1274 * Return 0 on success and -errno in error cases 1275 */ 1276 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1277 int *num, uint64_t *host_offset, QCowL2Meta **m) 1278 { 1279 BDRVQcow2State *s = bs->opaque; 1280 uint64_t start, remaining; 1281 uint64_t cluster_offset; 1282 uint64_t cur_bytes; 1283 int ret; 1284 1285 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1286 1287 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1288 1289 again: 1290 start = offset; 1291 remaining = (uint64_t)*num << BDRV_SECTOR_BITS; 1292 cluster_offset = 0; 1293 *host_offset = 0; 1294 cur_bytes = 0; 1295 *m = NULL; 1296 1297 while (true) { 1298 1299 if (!*host_offset) { 1300 *host_offset = start_of_cluster(s, cluster_offset); 1301 } 1302 1303 assert(remaining >= cur_bytes); 1304 1305 start += cur_bytes; 1306 remaining -= cur_bytes; 1307 cluster_offset += cur_bytes; 1308 1309 if (remaining == 0) { 1310 break; 1311 } 1312 1313 cur_bytes = remaining; 1314 1315 /* 1316 * Now start gathering as many contiguous clusters as possible: 1317 * 1318 * 1. Check for overlaps with in-flight allocations 1319 * 1320 * a) Overlap not in the first cluster -> shorten this request and 1321 * let the caller handle the rest in its next loop iteration. 1322 * 1323 * b) Real overlaps of two requests. Yield and restart the search 1324 * for contiguous clusters (the situation could have changed 1325 * while we were sleeping) 1326 * 1327 * c) TODO: Request starts in the same cluster as the in-flight 1328 * allocation ends. Shorten the COW of the in-fight allocation, 1329 * set cluster_offset to write to the same cluster and set up 1330 * the right synchronisation between the in-flight request and 1331 * the new one. 1332 */ 1333 ret = handle_dependencies(bs, start, &cur_bytes, m); 1334 if (ret == -EAGAIN) { 1335 /* Currently handle_dependencies() doesn't yield if we already had 1336 * an allocation. If it did, we would have to clean up the L2Meta 1337 * structs before starting over. */ 1338 assert(*m == NULL); 1339 goto again; 1340 } else if (ret < 0) { 1341 return ret; 1342 } else if (cur_bytes == 0) { 1343 break; 1344 } else { 1345 /* handle_dependencies() may have decreased cur_bytes (shortened 1346 * the allocations below) so that the next dependency is processed 1347 * correctly during the next loop iteration. */ 1348 } 1349 1350 /* 1351 * 2. Count contiguous COPIED clusters. 1352 */ 1353 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1354 if (ret < 0) { 1355 return ret; 1356 } else if (ret) { 1357 continue; 1358 } else if (cur_bytes == 0) { 1359 break; 1360 } 1361 1362 /* 1363 * 3. If the request still hasn't completed, allocate new clusters, 1364 * considering any cluster_offset of steps 1c or 2. 1365 */ 1366 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1367 if (ret < 0) { 1368 return ret; 1369 } else if (ret) { 1370 continue; 1371 } else { 1372 assert(cur_bytes == 0); 1373 break; 1374 } 1375 } 1376 1377 *num -= remaining >> BDRV_SECTOR_BITS; 1378 assert(*num > 0); 1379 assert(*host_offset != 0); 1380 1381 return 0; 1382 } 1383 1384 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1385 const uint8_t *buf, int buf_size) 1386 { 1387 z_stream strm1, *strm = &strm1; 1388 int ret, out_len; 1389 1390 memset(strm, 0, sizeof(*strm)); 1391 1392 strm->next_in = (uint8_t *)buf; 1393 strm->avail_in = buf_size; 1394 strm->next_out = out_buf; 1395 strm->avail_out = out_buf_size; 1396 1397 ret = inflateInit2(strm, -12); 1398 if (ret != Z_OK) 1399 return -1; 1400 ret = inflate(strm, Z_FINISH); 1401 out_len = strm->next_out - out_buf; 1402 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1403 out_len != out_buf_size) { 1404 inflateEnd(strm); 1405 return -1; 1406 } 1407 inflateEnd(strm); 1408 return 0; 1409 } 1410 1411 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1412 { 1413 BDRVQcow2State *s = bs->opaque; 1414 int ret, csize, nb_csectors, sector_offset; 1415 uint64_t coffset; 1416 1417 coffset = cluster_offset & s->cluster_offset_mask; 1418 if (s->cluster_cache_offset != coffset) { 1419 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1420 sector_offset = coffset & 511; 1421 csize = nb_csectors * 512 - sector_offset; 1422 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1423 ret = bdrv_read(bs->file->bs, coffset >> 9, s->cluster_data, 1424 nb_csectors); 1425 if (ret < 0) { 1426 return ret; 1427 } 1428 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1429 s->cluster_data + sector_offset, csize) < 0) { 1430 return -EIO; 1431 } 1432 s->cluster_cache_offset = coffset; 1433 } 1434 return 0; 1435 } 1436 1437 /* 1438 * This discards as many clusters of nb_clusters as possible at once (i.e. 1439 * all clusters in the same L2 table) and returns the number of discarded 1440 * clusters. 1441 */ 1442 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1443 uint64_t nb_clusters, enum qcow2_discard_type type, 1444 bool full_discard) 1445 { 1446 BDRVQcow2State *s = bs->opaque; 1447 uint64_t *l2_table; 1448 int l2_index; 1449 int ret; 1450 int i; 1451 1452 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1453 if (ret < 0) { 1454 return ret; 1455 } 1456 1457 /* Limit nb_clusters to one L2 table */ 1458 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1459 assert(nb_clusters <= INT_MAX); 1460 1461 for (i = 0; i < nb_clusters; i++) { 1462 uint64_t old_l2_entry; 1463 1464 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1465 1466 /* 1467 * If full_discard is false, make sure that a discarded area reads back 1468 * as zeroes for v3 images (we cannot do it for v2 without actually 1469 * writing a zero-filled buffer). We can skip the operation if the 1470 * cluster is already marked as zero, or if it's unallocated and we 1471 * don't have a backing file. 1472 * 1473 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1474 * holding s->lock, so that doesn't work today. 1475 * 1476 * If full_discard is true, the sector should not read back as zeroes, 1477 * but rather fall through to the backing file. 1478 */ 1479 switch (qcow2_get_cluster_type(old_l2_entry)) { 1480 case QCOW2_CLUSTER_UNALLOCATED: 1481 if (full_discard || !bs->backing) { 1482 continue; 1483 } 1484 break; 1485 1486 case QCOW2_CLUSTER_ZERO: 1487 if (!full_discard) { 1488 continue; 1489 } 1490 break; 1491 1492 case QCOW2_CLUSTER_NORMAL: 1493 case QCOW2_CLUSTER_COMPRESSED: 1494 break; 1495 1496 default: 1497 abort(); 1498 } 1499 1500 /* First remove L2 entries */ 1501 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1502 if (!full_discard && s->qcow_version >= 3) { 1503 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1504 } else { 1505 l2_table[l2_index + i] = cpu_to_be64(0); 1506 } 1507 1508 /* Then decrease the refcount */ 1509 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1510 } 1511 1512 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1513 1514 return nb_clusters; 1515 } 1516 1517 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1518 int nb_sectors, enum qcow2_discard_type type, bool full_discard) 1519 { 1520 BDRVQcow2State *s = bs->opaque; 1521 uint64_t end_offset; 1522 uint64_t nb_clusters; 1523 int ret; 1524 1525 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1526 1527 /* Round start up and end down */ 1528 offset = align_offset(offset, s->cluster_size); 1529 end_offset = start_of_cluster(s, end_offset); 1530 1531 if (offset > end_offset) { 1532 return 0; 1533 } 1534 1535 nb_clusters = size_to_clusters(s, end_offset - offset); 1536 1537 s->cache_discards = true; 1538 1539 /* Each L2 table is handled by its own loop iteration */ 1540 while (nb_clusters > 0) { 1541 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard); 1542 if (ret < 0) { 1543 goto fail; 1544 } 1545 1546 nb_clusters -= ret; 1547 offset += (ret * s->cluster_size); 1548 } 1549 1550 ret = 0; 1551 fail: 1552 s->cache_discards = false; 1553 qcow2_process_discards(bs, ret); 1554 1555 return ret; 1556 } 1557 1558 /* 1559 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1560 * all clusters in the same L2 table) and returns the number of zeroed 1561 * clusters. 1562 */ 1563 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1564 uint64_t nb_clusters) 1565 { 1566 BDRVQcow2State *s = bs->opaque; 1567 uint64_t *l2_table; 1568 int l2_index; 1569 int ret; 1570 int i; 1571 1572 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1573 if (ret < 0) { 1574 return ret; 1575 } 1576 1577 /* Limit nb_clusters to one L2 table */ 1578 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1579 assert(nb_clusters <= INT_MAX); 1580 1581 for (i = 0; i < nb_clusters; i++) { 1582 uint64_t old_offset; 1583 1584 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1585 1586 /* Update L2 entries */ 1587 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1588 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1589 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1590 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1591 } else { 1592 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1593 } 1594 } 1595 1596 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1597 1598 return nb_clusters; 1599 } 1600 1601 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1602 { 1603 BDRVQcow2State *s = bs->opaque; 1604 uint64_t nb_clusters; 1605 int ret; 1606 1607 /* The zero flag is only supported by version 3 and newer */ 1608 if (s->qcow_version < 3) { 1609 return -ENOTSUP; 1610 } 1611 1612 /* Each L2 table is handled by its own loop iteration */ 1613 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1614 1615 s->cache_discards = true; 1616 1617 while (nb_clusters > 0) { 1618 ret = zero_single_l2(bs, offset, nb_clusters); 1619 if (ret < 0) { 1620 goto fail; 1621 } 1622 1623 nb_clusters -= ret; 1624 offset += (ret * s->cluster_size); 1625 } 1626 1627 ret = 0; 1628 fail: 1629 s->cache_discards = false; 1630 qcow2_process_discards(bs, ret); 1631 1632 return ret; 1633 } 1634 1635 /* 1636 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1637 * non-backed non-pre-allocated zero clusters). 1638 * 1639 * l1_entries and *visited_l1_entries are used to keep track of progress for 1640 * status_cb(). l1_entries contains the total number of L1 entries and 1641 * *visited_l1_entries counts all visited L1 entries. 1642 */ 1643 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1644 int l1_size, int64_t *visited_l1_entries, 1645 int64_t l1_entries, 1646 BlockDriverAmendStatusCB *status_cb, 1647 void *cb_opaque) 1648 { 1649 BDRVQcow2State *s = bs->opaque; 1650 bool is_active_l1 = (l1_table == s->l1_table); 1651 uint64_t *l2_table = NULL; 1652 int ret; 1653 int i, j; 1654 1655 if (!is_active_l1) { 1656 /* inactive L2 tables require a buffer to be stored in when loading 1657 * them from disk */ 1658 l2_table = qemu_try_blockalign(bs->file->bs, s->cluster_size); 1659 if (l2_table == NULL) { 1660 return -ENOMEM; 1661 } 1662 } 1663 1664 for (i = 0; i < l1_size; i++) { 1665 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1666 bool l2_dirty = false; 1667 uint64_t l2_refcount; 1668 1669 if (!l2_offset) { 1670 /* unallocated */ 1671 (*visited_l1_entries)++; 1672 if (status_cb) { 1673 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1674 } 1675 continue; 1676 } 1677 1678 if (offset_into_cluster(s, l2_offset)) { 1679 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1680 PRIx64 " unaligned (L1 index: %#x)", 1681 l2_offset, i); 1682 ret = -EIO; 1683 goto fail; 1684 } 1685 1686 if (is_active_l1) { 1687 /* get active L2 tables from cache */ 1688 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1689 (void **)&l2_table); 1690 } else { 1691 /* load inactive L2 tables from disk */ 1692 ret = bdrv_read(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE, 1693 (void *)l2_table, s->cluster_sectors); 1694 } 1695 if (ret < 0) { 1696 goto fail; 1697 } 1698 1699 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1700 &l2_refcount); 1701 if (ret < 0) { 1702 goto fail; 1703 } 1704 1705 for (j = 0; j < s->l2_size; j++) { 1706 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1707 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1708 int cluster_type = qcow2_get_cluster_type(l2_entry); 1709 bool preallocated = offset != 0; 1710 1711 if (cluster_type != QCOW2_CLUSTER_ZERO) { 1712 continue; 1713 } 1714 1715 if (!preallocated) { 1716 if (!bs->backing) { 1717 /* not backed; therefore we can simply deallocate the 1718 * cluster */ 1719 l2_table[j] = 0; 1720 l2_dirty = true; 1721 continue; 1722 } 1723 1724 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1725 if (offset < 0) { 1726 ret = offset; 1727 goto fail; 1728 } 1729 1730 if (l2_refcount > 1) { 1731 /* For shared L2 tables, set the refcount accordingly (it is 1732 * already 1 and needs to be l2_refcount) */ 1733 ret = qcow2_update_cluster_refcount(bs, 1734 offset >> s->cluster_bits, 1735 refcount_diff(1, l2_refcount), false, 1736 QCOW2_DISCARD_OTHER); 1737 if (ret < 0) { 1738 qcow2_free_clusters(bs, offset, s->cluster_size, 1739 QCOW2_DISCARD_OTHER); 1740 goto fail; 1741 } 1742 } 1743 } 1744 1745 if (offset_into_cluster(s, offset)) { 1746 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1747 "%#" PRIx64 " unaligned (L2 offset: %#" 1748 PRIx64 ", L2 index: %#x)", offset, 1749 l2_offset, j); 1750 if (!preallocated) { 1751 qcow2_free_clusters(bs, offset, s->cluster_size, 1752 QCOW2_DISCARD_ALWAYS); 1753 } 1754 ret = -EIO; 1755 goto fail; 1756 } 1757 1758 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1759 if (ret < 0) { 1760 if (!preallocated) { 1761 qcow2_free_clusters(bs, offset, s->cluster_size, 1762 QCOW2_DISCARD_ALWAYS); 1763 } 1764 goto fail; 1765 } 1766 1767 ret = bdrv_write_zeroes(bs->file->bs, offset / BDRV_SECTOR_SIZE, 1768 s->cluster_sectors, 0); 1769 if (ret < 0) { 1770 if (!preallocated) { 1771 qcow2_free_clusters(bs, offset, s->cluster_size, 1772 QCOW2_DISCARD_ALWAYS); 1773 } 1774 goto fail; 1775 } 1776 1777 if (l2_refcount == 1) { 1778 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1779 } else { 1780 l2_table[j] = cpu_to_be64(offset); 1781 } 1782 l2_dirty = true; 1783 } 1784 1785 if (is_active_l1) { 1786 if (l2_dirty) { 1787 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1788 qcow2_cache_depends_on_flush(s->l2_table_cache); 1789 } 1790 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1791 } else { 1792 if (l2_dirty) { 1793 ret = qcow2_pre_write_overlap_check(bs, 1794 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1795 s->cluster_size); 1796 if (ret < 0) { 1797 goto fail; 1798 } 1799 1800 ret = bdrv_write(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE, 1801 (void *)l2_table, s->cluster_sectors); 1802 if (ret < 0) { 1803 goto fail; 1804 } 1805 } 1806 } 1807 1808 (*visited_l1_entries)++; 1809 if (status_cb) { 1810 status_cb(bs, *visited_l1_entries, l1_entries, cb_opaque); 1811 } 1812 } 1813 1814 ret = 0; 1815 1816 fail: 1817 if (l2_table) { 1818 if (!is_active_l1) { 1819 qemu_vfree(l2_table); 1820 } else { 1821 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1822 } 1823 } 1824 return ret; 1825 } 1826 1827 /* 1828 * For backed images, expands all zero clusters on the image. For non-backed 1829 * images, deallocates all non-pre-allocated zero clusters (and claims the 1830 * allocation for pre-allocated ones). This is important for downgrading to a 1831 * qcow2 version which doesn't yet support metadata zero clusters. 1832 */ 1833 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1834 BlockDriverAmendStatusCB *status_cb, 1835 void *cb_opaque) 1836 { 1837 BDRVQcow2State *s = bs->opaque; 1838 uint64_t *l1_table = NULL; 1839 int64_t l1_entries = 0, visited_l1_entries = 0; 1840 int ret; 1841 int i, j; 1842 1843 if (status_cb) { 1844 l1_entries = s->l1_size; 1845 for (i = 0; i < s->nb_snapshots; i++) { 1846 l1_entries += s->snapshots[i].l1_size; 1847 } 1848 } 1849 1850 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1851 &visited_l1_entries, l1_entries, 1852 status_cb, cb_opaque); 1853 if (ret < 0) { 1854 goto fail; 1855 } 1856 1857 /* Inactive L1 tables may point to active L2 tables - therefore it is 1858 * necessary to flush the L2 table cache before trying to access the L2 1859 * tables pointed to by inactive L1 entries (else we might try to expand 1860 * zero clusters that have already been expanded); furthermore, it is also 1861 * necessary to empty the L2 table cache, since it may contain tables which 1862 * are now going to be modified directly on disk, bypassing the cache. 1863 * qcow2_cache_empty() does both for us. */ 1864 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1865 if (ret < 0) { 1866 goto fail; 1867 } 1868 1869 for (i = 0; i < s->nb_snapshots; i++) { 1870 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1871 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1872 1873 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1874 1875 ret = bdrv_read(bs->file->bs, 1876 s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE, 1877 (void *)l1_table, l1_sectors); 1878 if (ret < 0) { 1879 goto fail; 1880 } 1881 1882 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1883 be64_to_cpus(&l1_table[j]); 1884 } 1885 1886 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1887 &visited_l1_entries, l1_entries, 1888 status_cb, cb_opaque); 1889 if (ret < 0) { 1890 goto fail; 1891 } 1892 } 1893 1894 ret = 0; 1895 1896 fail: 1897 g_free(l1_table); 1898 return ret; 1899 } 1900