1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 33 bool exact_size) 34 { 35 BDRVQcowState *s = bs->opaque; 36 int new_l1_size2, ret, i; 37 uint64_t *new_l1_table; 38 int64_t old_l1_table_offset, old_l1_size; 39 int64_t new_l1_table_offset, new_l1_size; 40 uint8_t data[12]; 41 42 if (min_size <= s->l1_size) 43 return 0; 44 45 /* Do a sanity check on min_size before trying to calculate new_l1_size 46 * (this prevents overflows during the while loop for the calculation of 47 * new_l1_size) */ 48 if (min_size > INT_MAX / sizeof(uint64_t)) { 49 return -EFBIG; 50 } 51 52 if (exact_size) { 53 new_l1_size = min_size; 54 } else { 55 /* Bump size up to reduce the number of times we have to grow */ 56 new_l1_size = s->l1_size; 57 if (new_l1_size == 0) { 58 new_l1_size = 1; 59 } 60 while (min_size > new_l1_size) { 61 new_l1_size = (new_l1_size * 3 + 1) / 2; 62 } 63 } 64 65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) { 66 return -EFBIG; 67 } 68 69 #ifdef DEBUG_ALLOC2 70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 71 s->l1_size, new_l1_size); 72 #endif 73 74 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 75 new_l1_table = qemu_try_blockalign(bs->file, 76 align_offset(new_l1_size2, 512)); 77 if (new_l1_table == NULL) { 78 return -ENOMEM; 79 } 80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 81 82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 83 84 /* write new table (align to cluster) */ 85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 87 if (new_l1_table_offset < 0) { 88 qemu_vfree(new_l1_table); 89 return new_l1_table_offset; 90 } 91 92 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 93 if (ret < 0) { 94 goto fail; 95 } 96 97 /* the L1 position has not yet been updated, so these clusters must 98 * indeed be completely free */ 99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 100 new_l1_size2); 101 if (ret < 0) { 102 goto fail; 103 } 104 105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 106 for(i = 0; i < s->l1_size; i++) 107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 109 if (ret < 0) 110 goto fail; 111 for(i = 0; i < s->l1_size; i++) 112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 113 114 /* set new table */ 115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 116 cpu_to_be32w((uint32_t*)data, new_l1_size); 117 stq_be_p(data + 4, new_l1_table_offset); 118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 119 if (ret < 0) { 120 goto fail; 121 } 122 qemu_vfree(s->l1_table); 123 old_l1_table_offset = s->l1_table_offset; 124 s->l1_table_offset = new_l1_table_offset; 125 s->l1_table = new_l1_table; 126 old_l1_size = s->l1_size; 127 s->l1_size = new_l1_size; 128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 129 QCOW2_DISCARD_OTHER); 130 return 0; 131 fail: 132 qemu_vfree(new_l1_table); 133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 134 QCOW2_DISCARD_OTHER); 135 return ret; 136 } 137 138 /* 139 * l2_load 140 * 141 * Loads a L2 table into memory. If the table is in the cache, the cache 142 * is used; otherwise the L2 table is loaded from the image file. 143 * 144 * Returns a pointer to the L2 table on success, or NULL if the read from 145 * the image file failed. 146 */ 147 148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 149 uint64_t **l2_table) 150 { 151 BDRVQcowState *s = bs->opaque; 152 int ret; 153 154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 155 156 return ret; 157 } 158 159 /* 160 * Writes one sector of the L1 table to the disk (can't update single entries 161 * and we really don't want bdrv_pread to perform a read-modify-write) 162 */ 163 #define L1_ENTRIES_PER_SECTOR (512 / 8) 164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 165 { 166 BDRVQcowState *s = bs->opaque; 167 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 168 int l1_start_index; 169 int i, ret; 170 171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 172 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 173 i++) 174 { 175 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 176 } 177 178 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 179 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 180 if (ret < 0) { 181 return ret; 182 } 183 184 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 185 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 186 buf, sizeof(buf)); 187 if (ret < 0) { 188 return ret; 189 } 190 191 return 0; 192 } 193 194 /* 195 * l2_allocate 196 * 197 * Allocate a new l2 entry in the file. If l1_index points to an already 198 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 199 * table) copy the contents of the old L2 table into the newly allocated one. 200 * Otherwise the new table is initialized with zeros. 201 * 202 */ 203 204 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 205 { 206 BDRVQcowState *s = bs->opaque; 207 uint64_t old_l2_offset; 208 uint64_t *l2_table = NULL; 209 int64_t l2_offset; 210 int ret; 211 212 old_l2_offset = s->l1_table[l1_index]; 213 214 trace_qcow2_l2_allocate(bs, l1_index); 215 216 /* allocate a new l2 entry */ 217 218 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 219 if (l2_offset < 0) { 220 ret = l2_offset; 221 goto fail; 222 } 223 224 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 225 if (ret < 0) { 226 goto fail; 227 } 228 229 /* allocate a new entry in the l2 cache */ 230 231 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 232 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 233 if (ret < 0) { 234 goto fail; 235 } 236 237 l2_table = *table; 238 239 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 240 /* if there was no old l2 table, clear the new table */ 241 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 242 } else { 243 uint64_t* old_table; 244 245 /* if there was an old l2 table, read it from the disk */ 246 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 247 ret = qcow2_cache_get(bs, s->l2_table_cache, 248 old_l2_offset & L1E_OFFSET_MASK, 249 (void**) &old_table); 250 if (ret < 0) { 251 goto fail; 252 } 253 254 memcpy(l2_table, old_table, s->cluster_size); 255 256 qcow2_cache_put(bs, s->l2_table_cache, (void **) &old_table); 257 } 258 259 /* write the l2 table to the file */ 260 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 261 262 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 263 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 264 ret = qcow2_cache_flush(bs, s->l2_table_cache); 265 if (ret < 0) { 266 goto fail; 267 } 268 269 /* update the L1 entry */ 270 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 271 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 272 ret = qcow2_write_l1_entry(bs, l1_index); 273 if (ret < 0) { 274 goto fail; 275 } 276 277 *table = l2_table; 278 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 279 return 0; 280 281 fail: 282 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 283 if (l2_table != NULL) { 284 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 285 } 286 s->l1_table[l1_index] = old_l2_offset; 287 if (l2_offset > 0) { 288 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 289 QCOW2_DISCARD_ALWAYS); 290 } 291 return ret; 292 } 293 294 /* 295 * Checks how many clusters in a given L2 table are contiguous in the image 296 * file. As soon as one of the flags in the bitmask stop_flags changes compared 297 * to the first cluster, the search is stopped and the cluster is not counted 298 * as contiguous. (This allows it, for example, to stop at the first compressed 299 * cluster which may require a different handling) 300 */ 301 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 302 uint64_t *l2_table, uint64_t stop_flags) 303 { 304 int i; 305 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 306 uint64_t first_entry = be64_to_cpu(l2_table[0]); 307 uint64_t offset = first_entry & mask; 308 309 if (!offset) 310 return 0; 311 312 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); 313 314 for (i = 0; i < nb_clusters; i++) { 315 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 316 if (offset + (uint64_t) i * cluster_size != l2_entry) { 317 break; 318 } 319 } 320 321 return i; 322 } 323 324 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 325 { 326 int i; 327 328 for (i = 0; i < nb_clusters; i++) { 329 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 330 331 if (type != QCOW2_CLUSTER_UNALLOCATED) { 332 break; 333 } 334 } 335 336 return i; 337 } 338 339 /* The crypt function is compatible with the linux cryptoloop 340 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 341 supported */ 342 int qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 343 uint8_t *out_buf, const uint8_t *in_buf, 344 int nb_sectors, bool enc, 345 Error **errp) 346 { 347 union { 348 uint64_t ll[2]; 349 uint8_t b[16]; 350 } ivec; 351 int i; 352 int ret; 353 354 for(i = 0; i < nb_sectors; i++) { 355 ivec.ll[0] = cpu_to_le64(sector_num); 356 ivec.ll[1] = 0; 357 if (qcrypto_cipher_setiv(s->cipher, 358 ivec.b, G_N_ELEMENTS(ivec.b), 359 errp) < 0) { 360 return -1; 361 } 362 if (enc) { 363 ret = qcrypto_cipher_encrypt(s->cipher, 364 in_buf, 365 out_buf, 366 512, 367 errp); 368 } else { 369 ret = qcrypto_cipher_decrypt(s->cipher, 370 in_buf, 371 out_buf, 372 512, 373 errp); 374 } 375 if (ret < 0) { 376 return -1; 377 } 378 sector_num++; 379 in_buf += 512; 380 out_buf += 512; 381 } 382 return 0; 383 } 384 385 static int coroutine_fn copy_sectors(BlockDriverState *bs, 386 uint64_t start_sect, 387 uint64_t cluster_offset, 388 int n_start, int n_end) 389 { 390 BDRVQcowState *s = bs->opaque; 391 QEMUIOVector qiov; 392 struct iovec iov; 393 int n, ret; 394 395 n = n_end - n_start; 396 if (n <= 0) { 397 return 0; 398 } 399 400 iov.iov_len = n * BDRV_SECTOR_SIZE; 401 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 402 if (iov.iov_base == NULL) { 403 return -ENOMEM; 404 } 405 406 qemu_iovec_init_external(&qiov, &iov, 1); 407 408 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 409 410 if (!bs->drv) { 411 ret = -ENOMEDIUM; 412 goto out; 413 } 414 415 /* Call .bdrv_co_readv() directly instead of using the public block-layer 416 * interface. This avoids double I/O throttling and request tracking, 417 * which can lead to deadlock when block layer copy-on-read is enabled. 418 */ 419 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 420 if (ret < 0) { 421 goto out; 422 } 423 424 if (bs->encrypted) { 425 Error *err = NULL; 426 assert(s->cipher); 427 if (qcow2_encrypt_sectors(s, start_sect + n_start, 428 iov.iov_base, iov.iov_base, n, 429 true, &err) < 0) { 430 ret = -EIO; 431 error_free(err); 432 goto out; 433 } 434 } 435 436 ret = qcow2_pre_write_overlap_check(bs, 0, 437 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 438 if (ret < 0) { 439 goto out; 440 } 441 442 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 443 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 444 if (ret < 0) { 445 goto out; 446 } 447 448 ret = 0; 449 out: 450 qemu_vfree(iov.iov_base); 451 return ret; 452 } 453 454 455 /* 456 * get_cluster_offset 457 * 458 * For a given offset of the disk image, find the cluster offset in 459 * qcow2 file. The offset is stored in *cluster_offset. 460 * 461 * on entry, *num is the number of contiguous sectors we'd like to 462 * access following offset. 463 * 464 * on exit, *num is the number of contiguous sectors we can read. 465 * 466 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 467 * cases. 468 */ 469 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 470 int *num, uint64_t *cluster_offset) 471 { 472 BDRVQcowState *s = bs->opaque; 473 unsigned int l2_index; 474 uint64_t l1_index, l2_offset, *l2_table; 475 int l1_bits, c; 476 unsigned int index_in_cluster, nb_clusters; 477 uint64_t nb_available, nb_needed; 478 int ret; 479 480 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 481 nb_needed = *num + index_in_cluster; 482 483 l1_bits = s->l2_bits + s->cluster_bits; 484 485 /* compute how many bytes there are between the offset and 486 * the end of the l1 entry 487 */ 488 489 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 490 491 /* compute the number of available sectors */ 492 493 nb_available = (nb_available >> 9) + index_in_cluster; 494 495 if (nb_needed > nb_available) { 496 nb_needed = nb_available; 497 } 498 499 *cluster_offset = 0; 500 501 /* seek the the l2 offset in the l1 table */ 502 503 l1_index = offset >> l1_bits; 504 if (l1_index >= s->l1_size) { 505 ret = QCOW2_CLUSTER_UNALLOCATED; 506 goto out; 507 } 508 509 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 510 if (!l2_offset) { 511 ret = QCOW2_CLUSTER_UNALLOCATED; 512 goto out; 513 } 514 515 if (offset_into_cluster(s, l2_offset)) { 516 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 517 " unaligned (L1 index: %#" PRIx64 ")", 518 l2_offset, l1_index); 519 return -EIO; 520 } 521 522 /* load the l2 table in memory */ 523 524 ret = l2_load(bs, l2_offset, &l2_table); 525 if (ret < 0) { 526 return ret; 527 } 528 529 /* find the cluster offset for the given disk offset */ 530 531 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 532 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 533 nb_clusters = size_to_clusters(s, nb_needed << 9); 534 535 ret = qcow2_get_cluster_type(*cluster_offset); 536 switch (ret) { 537 case QCOW2_CLUSTER_COMPRESSED: 538 /* Compressed clusters can only be processed one by one */ 539 c = 1; 540 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 541 break; 542 case QCOW2_CLUSTER_ZERO: 543 if (s->qcow_version < 3) { 544 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 545 " in pre-v3 image (L2 offset: %#" PRIx64 546 ", L2 index: %#x)", l2_offset, l2_index); 547 ret = -EIO; 548 goto fail; 549 } 550 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 551 &l2_table[l2_index], QCOW_OFLAG_ZERO); 552 *cluster_offset = 0; 553 break; 554 case QCOW2_CLUSTER_UNALLOCATED: 555 /* how many empty clusters ? */ 556 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 557 *cluster_offset = 0; 558 break; 559 case QCOW2_CLUSTER_NORMAL: 560 /* how many allocated clusters ? */ 561 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 562 &l2_table[l2_index], QCOW_OFLAG_ZERO); 563 *cluster_offset &= L2E_OFFSET_MASK; 564 if (offset_into_cluster(s, *cluster_offset)) { 565 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 566 PRIx64 " unaligned (L2 offset: %#" PRIx64 567 ", L2 index: %#x)", *cluster_offset, 568 l2_offset, l2_index); 569 ret = -EIO; 570 goto fail; 571 } 572 break; 573 default: 574 abort(); 575 } 576 577 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 578 579 nb_available = (c * s->cluster_sectors); 580 581 out: 582 if (nb_available > nb_needed) 583 nb_available = nb_needed; 584 585 *num = nb_available - index_in_cluster; 586 587 return ret; 588 589 fail: 590 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 591 return ret; 592 } 593 594 /* 595 * get_cluster_table 596 * 597 * for a given disk offset, load (and allocate if needed) 598 * the l2 table. 599 * 600 * the l2 table offset in the qcow2 file and the cluster index 601 * in the l2 table are given to the caller. 602 * 603 * Returns 0 on success, -errno in failure case 604 */ 605 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 606 uint64_t **new_l2_table, 607 int *new_l2_index) 608 { 609 BDRVQcowState *s = bs->opaque; 610 unsigned int l2_index; 611 uint64_t l1_index, l2_offset; 612 uint64_t *l2_table = NULL; 613 int ret; 614 615 /* seek the the l2 offset in the l1 table */ 616 617 l1_index = offset >> (s->l2_bits + s->cluster_bits); 618 if (l1_index >= s->l1_size) { 619 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 620 if (ret < 0) { 621 return ret; 622 } 623 } 624 625 assert(l1_index < s->l1_size); 626 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 627 if (offset_into_cluster(s, l2_offset)) { 628 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 629 " unaligned (L1 index: %#" PRIx64 ")", 630 l2_offset, l1_index); 631 return -EIO; 632 } 633 634 /* seek the l2 table of the given l2 offset */ 635 636 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 637 /* load the l2 table in memory */ 638 ret = l2_load(bs, l2_offset, &l2_table); 639 if (ret < 0) { 640 return ret; 641 } 642 } else { 643 /* First allocate a new L2 table (and do COW if needed) */ 644 ret = l2_allocate(bs, l1_index, &l2_table); 645 if (ret < 0) { 646 return ret; 647 } 648 649 /* Then decrease the refcount of the old table */ 650 if (l2_offset) { 651 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 652 QCOW2_DISCARD_OTHER); 653 } 654 } 655 656 /* find the cluster offset for the given disk offset */ 657 658 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 659 660 *new_l2_table = l2_table; 661 *new_l2_index = l2_index; 662 663 return 0; 664 } 665 666 /* 667 * alloc_compressed_cluster_offset 668 * 669 * For a given offset of the disk image, return cluster offset in 670 * qcow2 file. 671 * 672 * If the offset is not found, allocate a new compressed cluster. 673 * 674 * Return the cluster offset if successful, 675 * Return 0, otherwise. 676 * 677 */ 678 679 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 680 uint64_t offset, 681 int compressed_size) 682 { 683 BDRVQcowState *s = bs->opaque; 684 int l2_index, ret; 685 uint64_t *l2_table; 686 int64_t cluster_offset; 687 int nb_csectors; 688 689 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 690 if (ret < 0) { 691 return 0; 692 } 693 694 /* Compression can't overwrite anything. Fail if the cluster was already 695 * allocated. */ 696 cluster_offset = be64_to_cpu(l2_table[l2_index]); 697 if (cluster_offset & L2E_OFFSET_MASK) { 698 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 699 return 0; 700 } 701 702 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 703 if (cluster_offset < 0) { 704 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 705 return 0; 706 } 707 708 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 709 (cluster_offset >> 9); 710 711 cluster_offset |= QCOW_OFLAG_COMPRESSED | 712 ((uint64_t)nb_csectors << s->csize_shift); 713 714 /* update L2 table */ 715 716 /* compressed clusters never have the copied flag */ 717 718 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 719 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 720 l2_table[l2_index] = cpu_to_be64(cluster_offset); 721 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 722 723 return cluster_offset; 724 } 725 726 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 727 { 728 BDRVQcowState *s = bs->opaque; 729 int ret; 730 731 if (r->nb_sectors == 0) { 732 return 0; 733 } 734 735 qemu_co_mutex_unlock(&s->lock); 736 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 737 r->offset / BDRV_SECTOR_SIZE, 738 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 739 qemu_co_mutex_lock(&s->lock); 740 741 if (ret < 0) { 742 return ret; 743 } 744 745 /* 746 * Before we update the L2 table to actually point to the new cluster, we 747 * need to be sure that the refcounts have been increased and COW was 748 * handled. 749 */ 750 qcow2_cache_depends_on_flush(s->l2_table_cache); 751 752 return 0; 753 } 754 755 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 756 { 757 BDRVQcowState *s = bs->opaque; 758 int i, j = 0, l2_index, ret; 759 uint64_t *old_cluster, *l2_table; 760 uint64_t cluster_offset = m->alloc_offset; 761 762 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 763 assert(m->nb_clusters > 0); 764 765 old_cluster = g_try_new(uint64_t, m->nb_clusters); 766 if (old_cluster == NULL) { 767 ret = -ENOMEM; 768 goto err; 769 } 770 771 /* copy content of unmodified sectors */ 772 ret = perform_cow(bs, m, &m->cow_start); 773 if (ret < 0) { 774 goto err; 775 } 776 777 ret = perform_cow(bs, m, &m->cow_end); 778 if (ret < 0) { 779 goto err; 780 } 781 782 /* Update L2 table. */ 783 if (s->use_lazy_refcounts) { 784 qcow2_mark_dirty(bs); 785 } 786 if (qcow2_need_accurate_refcounts(s)) { 787 qcow2_cache_set_dependency(bs, s->l2_table_cache, 788 s->refcount_block_cache); 789 } 790 791 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 792 if (ret < 0) { 793 goto err; 794 } 795 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 796 797 assert(l2_index + m->nb_clusters <= s->l2_size); 798 for (i = 0; i < m->nb_clusters; i++) { 799 /* if two concurrent writes happen to the same unallocated cluster 800 * each write allocates separate cluster and writes data concurrently. 801 * The first one to complete updates l2 table with pointer to its 802 * cluster the second one has to do RMW (which is done above by 803 * copy_sectors()), update l2 table with its cluster pointer and free 804 * old cluster. This is what this loop does */ 805 if(l2_table[l2_index + i] != 0) 806 old_cluster[j++] = l2_table[l2_index + i]; 807 808 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 809 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 810 } 811 812 813 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 814 815 /* 816 * If this was a COW, we need to decrease the refcount of the old cluster. 817 * Also flush bs->file to get the right order for L2 and refcount update. 818 * 819 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 820 * clusters), the next write will reuse them anyway. 821 */ 822 if (j != 0) { 823 for (i = 0; i < j; i++) { 824 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 825 QCOW2_DISCARD_NEVER); 826 } 827 } 828 829 ret = 0; 830 err: 831 g_free(old_cluster); 832 return ret; 833 } 834 835 /* 836 * Returns the number of contiguous clusters that can be used for an allocating 837 * write, but require COW to be performed (this includes yet unallocated space, 838 * which must copy from the backing file) 839 */ 840 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, 841 uint64_t *l2_table, int l2_index) 842 { 843 int i; 844 845 for (i = 0; i < nb_clusters; i++) { 846 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 847 int cluster_type = qcow2_get_cluster_type(l2_entry); 848 849 switch(cluster_type) { 850 case QCOW2_CLUSTER_NORMAL: 851 if (l2_entry & QCOW_OFLAG_COPIED) { 852 goto out; 853 } 854 break; 855 case QCOW2_CLUSTER_UNALLOCATED: 856 case QCOW2_CLUSTER_COMPRESSED: 857 case QCOW2_CLUSTER_ZERO: 858 break; 859 default: 860 abort(); 861 } 862 } 863 864 out: 865 assert(i <= nb_clusters); 866 return i; 867 } 868 869 /* 870 * Check if there already is an AIO write request in flight which allocates 871 * the same cluster. In this case we need to wait until the previous 872 * request has completed and updated the L2 table accordingly. 873 * 874 * Returns: 875 * 0 if there was no dependency. *cur_bytes indicates the number of 876 * bytes from guest_offset that can be read before the next 877 * dependency must be processed (or the request is complete) 878 * 879 * -EAGAIN if we had to wait for another request, previously gathered 880 * information on cluster allocation may be invalid now. The caller 881 * must start over anyway, so consider *cur_bytes undefined. 882 */ 883 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 884 uint64_t *cur_bytes, QCowL2Meta **m) 885 { 886 BDRVQcowState *s = bs->opaque; 887 QCowL2Meta *old_alloc; 888 uint64_t bytes = *cur_bytes; 889 890 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 891 892 uint64_t start = guest_offset; 893 uint64_t end = start + bytes; 894 uint64_t old_start = l2meta_cow_start(old_alloc); 895 uint64_t old_end = l2meta_cow_end(old_alloc); 896 897 if (end <= old_start || start >= old_end) { 898 /* No intersection */ 899 } else { 900 if (start < old_start) { 901 /* Stop at the start of a running allocation */ 902 bytes = old_start - start; 903 } else { 904 bytes = 0; 905 } 906 907 /* Stop if already an l2meta exists. After yielding, it wouldn't 908 * be valid any more, so we'd have to clean up the old L2Metas 909 * and deal with requests depending on them before starting to 910 * gather new ones. Not worth the trouble. */ 911 if (bytes == 0 && *m) { 912 *cur_bytes = 0; 913 return 0; 914 } 915 916 if (bytes == 0) { 917 /* Wait for the dependency to complete. We need to recheck 918 * the free/allocated clusters when we continue. */ 919 qemu_co_mutex_unlock(&s->lock); 920 qemu_co_queue_wait(&old_alloc->dependent_requests); 921 qemu_co_mutex_lock(&s->lock); 922 return -EAGAIN; 923 } 924 } 925 } 926 927 /* Make sure that existing clusters and new allocations are only used up to 928 * the next dependency if we shortened the request above */ 929 *cur_bytes = bytes; 930 931 return 0; 932 } 933 934 /* 935 * Checks how many already allocated clusters that don't require a copy on 936 * write there are at the given guest_offset (up to *bytes). If 937 * *host_offset is not zero, only physically contiguous clusters beginning at 938 * this host offset are counted. 939 * 940 * Note that guest_offset may not be cluster aligned. In this case, the 941 * returned *host_offset points to exact byte referenced by guest_offset and 942 * therefore isn't cluster aligned as well. 943 * 944 * Returns: 945 * 0: if no allocated clusters are available at the given offset. 946 * *bytes is normally unchanged. It is set to 0 if the cluster 947 * is allocated and doesn't need COW, but doesn't have the right 948 * physical offset. 949 * 950 * 1: if allocated clusters that don't require a COW are available at 951 * the requested offset. *bytes may have decreased and describes 952 * the length of the area that can be written to. 953 * 954 * -errno: in error cases 955 */ 956 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 957 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 958 { 959 BDRVQcowState *s = bs->opaque; 960 int l2_index; 961 uint64_t cluster_offset; 962 uint64_t *l2_table; 963 unsigned int nb_clusters; 964 unsigned int keep_clusters; 965 int ret; 966 967 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 968 *bytes); 969 970 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 971 == offset_into_cluster(s, *host_offset)); 972 973 /* 974 * Calculate the number of clusters to look for. We stop at L2 table 975 * boundaries to keep things simple. 976 */ 977 nb_clusters = 978 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 979 980 l2_index = offset_to_l2_index(s, guest_offset); 981 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 982 983 /* Find L2 entry for the first involved cluster */ 984 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 985 if (ret < 0) { 986 return ret; 987 } 988 989 cluster_offset = be64_to_cpu(l2_table[l2_index]); 990 991 /* Check how many clusters are already allocated and don't need COW */ 992 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 993 && (cluster_offset & QCOW_OFLAG_COPIED)) 994 { 995 /* If a specific host_offset is required, check it */ 996 bool offset_matches = 997 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 998 999 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 1000 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1001 "%#llx unaligned (guest offset: %#" PRIx64 1002 ")", cluster_offset & L2E_OFFSET_MASK, 1003 guest_offset); 1004 ret = -EIO; 1005 goto out; 1006 } 1007 1008 if (*host_offset != 0 && !offset_matches) { 1009 *bytes = 0; 1010 ret = 0; 1011 goto out; 1012 } 1013 1014 /* We keep all QCOW_OFLAG_COPIED clusters */ 1015 keep_clusters = 1016 count_contiguous_clusters(nb_clusters, s->cluster_size, 1017 &l2_table[l2_index], 1018 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1019 assert(keep_clusters <= nb_clusters); 1020 1021 *bytes = MIN(*bytes, 1022 keep_clusters * s->cluster_size 1023 - offset_into_cluster(s, guest_offset)); 1024 1025 ret = 1; 1026 } else { 1027 ret = 0; 1028 } 1029 1030 /* Cleanup */ 1031 out: 1032 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1033 1034 /* Only return a host offset if we actually made progress. Otherwise we 1035 * would make requirements for handle_alloc() that it can't fulfill */ 1036 if (ret > 0) { 1037 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1038 + offset_into_cluster(s, guest_offset); 1039 } 1040 1041 return ret; 1042 } 1043 1044 /* 1045 * Allocates new clusters for the given guest_offset. 1046 * 1047 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1048 * contain the number of clusters that have been allocated and are contiguous 1049 * in the image file. 1050 * 1051 * If *host_offset is non-zero, it specifies the offset in the image file at 1052 * which the new clusters must start. *nb_clusters can be 0 on return in this 1053 * case if the cluster at host_offset is already in use. If *host_offset is 1054 * zero, the clusters can be allocated anywhere in the image file. 1055 * 1056 * *host_offset is updated to contain the offset into the image file at which 1057 * the first allocated cluster starts. 1058 * 1059 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1060 * function has been waiting for another request and the allocation must be 1061 * restarted, but the whole request should not be failed. 1062 */ 1063 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1064 uint64_t *host_offset, unsigned int *nb_clusters) 1065 { 1066 BDRVQcowState *s = bs->opaque; 1067 1068 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1069 *host_offset, *nb_clusters); 1070 1071 /* Allocate new clusters */ 1072 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1073 if (*host_offset == 0) { 1074 int64_t cluster_offset = 1075 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1076 if (cluster_offset < 0) { 1077 return cluster_offset; 1078 } 1079 *host_offset = cluster_offset; 1080 return 0; 1081 } else { 1082 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1083 if (ret < 0) { 1084 return ret; 1085 } 1086 *nb_clusters = ret; 1087 return 0; 1088 } 1089 } 1090 1091 /* 1092 * Allocates new clusters for an area that either is yet unallocated or needs a 1093 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1094 * the new allocation can match the specified host offset. 1095 * 1096 * Note that guest_offset may not be cluster aligned. In this case, the 1097 * returned *host_offset points to exact byte referenced by guest_offset and 1098 * therefore isn't cluster aligned as well. 1099 * 1100 * Returns: 1101 * 0: if no clusters could be allocated. *bytes is set to 0, 1102 * *host_offset is left unchanged. 1103 * 1104 * 1: if new clusters were allocated. *bytes may be decreased if the 1105 * new allocation doesn't cover all of the requested area. 1106 * *host_offset is updated to contain the host offset of the first 1107 * newly allocated cluster. 1108 * 1109 * -errno: in error cases 1110 */ 1111 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1112 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1113 { 1114 BDRVQcowState *s = bs->opaque; 1115 int l2_index; 1116 uint64_t *l2_table; 1117 uint64_t entry; 1118 unsigned int nb_clusters; 1119 int ret; 1120 1121 uint64_t alloc_cluster_offset; 1122 1123 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1124 *bytes); 1125 assert(*bytes > 0); 1126 1127 /* 1128 * Calculate the number of clusters to look for. We stop at L2 table 1129 * boundaries to keep things simple. 1130 */ 1131 nb_clusters = 1132 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1133 1134 l2_index = offset_to_l2_index(s, guest_offset); 1135 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1136 1137 /* Find L2 entry for the first involved cluster */ 1138 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1139 if (ret < 0) { 1140 return ret; 1141 } 1142 1143 entry = be64_to_cpu(l2_table[l2_index]); 1144 1145 /* For the moment, overwrite compressed clusters one by one */ 1146 if (entry & QCOW_OFLAG_COMPRESSED) { 1147 nb_clusters = 1; 1148 } else { 1149 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1150 } 1151 1152 /* This function is only called when there were no non-COW clusters, so if 1153 * we can't find any unallocated or COW clusters either, something is 1154 * wrong with our code. */ 1155 assert(nb_clusters > 0); 1156 1157 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1158 1159 /* Allocate, if necessary at a given offset in the image file */ 1160 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1161 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1162 &nb_clusters); 1163 if (ret < 0) { 1164 goto fail; 1165 } 1166 1167 /* Can't extend contiguous allocation */ 1168 if (nb_clusters == 0) { 1169 *bytes = 0; 1170 return 0; 1171 } 1172 1173 /* !*host_offset would overwrite the image header and is reserved for "no 1174 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1175 * following overlap check; do that now to avoid having an invalid value in 1176 * *host_offset. */ 1177 if (!alloc_cluster_offset) { 1178 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1179 nb_clusters * s->cluster_size); 1180 assert(ret < 0); 1181 goto fail; 1182 } 1183 1184 /* 1185 * Save info needed for meta data update. 1186 * 1187 * requested_sectors: Number of sectors from the start of the first 1188 * newly allocated cluster to the end of the (possibly shortened 1189 * before) write request. 1190 * 1191 * avail_sectors: Number of sectors from the start of the first 1192 * newly allocated to the end of the last newly allocated cluster. 1193 * 1194 * nb_sectors: The number of sectors from the start of the first 1195 * newly allocated cluster to the end of the area that the write 1196 * request actually writes to (excluding COW at the end) 1197 */ 1198 int requested_sectors = 1199 (*bytes + offset_into_cluster(s, guest_offset)) 1200 >> BDRV_SECTOR_BITS; 1201 int avail_sectors = nb_clusters 1202 << (s->cluster_bits - BDRV_SECTOR_BITS); 1203 int alloc_n_start = offset_into_cluster(s, guest_offset) 1204 >> BDRV_SECTOR_BITS; 1205 int nb_sectors = MIN(requested_sectors, avail_sectors); 1206 QCowL2Meta *old_m = *m; 1207 1208 *m = g_malloc0(sizeof(**m)); 1209 1210 **m = (QCowL2Meta) { 1211 .next = old_m, 1212 1213 .alloc_offset = alloc_cluster_offset, 1214 .offset = start_of_cluster(s, guest_offset), 1215 .nb_clusters = nb_clusters, 1216 .nb_available = nb_sectors, 1217 1218 .cow_start = { 1219 .offset = 0, 1220 .nb_sectors = alloc_n_start, 1221 }, 1222 .cow_end = { 1223 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1224 .nb_sectors = avail_sectors - nb_sectors, 1225 }, 1226 }; 1227 qemu_co_queue_init(&(*m)->dependent_requests); 1228 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1229 1230 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1231 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1232 - offset_into_cluster(s, guest_offset)); 1233 assert(*bytes != 0); 1234 1235 return 1; 1236 1237 fail: 1238 if (*m && (*m)->nb_clusters > 0) { 1239 QLIST_REMOVE(*m, next_in_flight); 1240 } 1241 return ret; 1242 } 1243 1244 /* 1245 * alloc_cluster_offset 1246 * 1247 * For a given offset on the virtual disk, find the cluster offset in qcow2 1248 * file. If the offset is not found, allocate a new cluster. 1249 * 1250 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1251 * other fields in m are meaningless. 1252 * 1253 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1254 * contiguous clusters that have been allocated. In this case, the other 1255 * fields of m are valid and contain information about the first allocated 1256 * cluster. 1257 * 1258 * If the request conflicts with another write request in flight, the coroutine 1259 * is queued and will be reentered when the dependency has completed. 1260 * 1261 * Return 0 on success and -errno in error cases 1262 */ 1263 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1264 int *num, uint64_t *host_offset, QCowL2Meta **m) 1265 { 1266 BDRVQcowState *s = bs->opaque; 1267 uint64_t start, remaining; 1268 uint64_t cluster_offset; 1269 uint64_t cur_bytes; 1270 int ret; 1271 1272 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1273 1274 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1275 1276 again: 1277 start = offset; 1278 remaining = (uint64_t)*num << BDRV_SECTOR_BITS; 1279 cluster_offset = 0; 1280 *host_offset = 0; 1281 cur_bytes = 0; 1282 *m = NULL; 1283 1284 while (true) { 1285 1286 if (!*host_offset) { 1287 *host_offset = start_of_cluster(s, cluster_offset); 1288 } 1289 1290 assert(remaining >= cur_bytes); 1291 1292 start += cur_bytes; 1293 remaining -= cur_bytes; 1294 cluster_offset += cur_bytes; 1295 1296 if (remaining == 0) { 1297 break; 1298 } 1299 1300 cur_bytes = remaining; 1301 1302 /* 1303 * Now start gathering as many contiguous clusters as possible: 1304 * 1305 * 1. Check for overlaps with in-flight allocations 1306 * 1307 * a) Overlap not in the first cluster -> shorten this request and 1308 * let the caller handle the rest in its next loop iteration. 1309 * 1310 * b) Real overlaps of two requests. Yield and restart the search 1311 * for contiguous clusters (the situation could have changed 1312 * while we were sleeping) 1313 * 1314 * c) TODO: Request starts in the same cluster as the in-flight 1315 * allocation ends. Shorten the COW of the in-fight allocation, 1316 * set cluster_offset to write to the same cluster and set up 1317 * the right synchronisation between the in-flight request and 1318 * the new one. 1319 */ 1320 ret = handle_dependencies(bs, start, &cur_bytes, m); 1321 if (ret == -EAGAIN) { 1322 /* Currently handle_dependencies() doesn't yield if we already had 1323 * an allocation. If it did, we would have to clean up the L2Meta 1324 * structs before starting over. */ 1325 assert(*m == NULL); 1326 goto again; 1327 } else if (ret < 0) { 1328 return ret; 1329 } else if (cur_bytes == 0) { 1330 break; 1331 } else { 1332 /* handle_dependencies() may have decreased cur_bytes (shortened 1333 * the allocations below) so that the next dependency is processed 1334 * correctly during the next loop iteration. */ 1335 } 1336 1337 /* 1338 * 2. Count contiguous COPIED clusters. 1339 */ 1340 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1341 if (ret < 0) { 1342 return ret; 1343 } else if (ret) { 1344 continue; 1345 } else if (cur_bytes == 0) { 1346 break; 1347 } 1348 1349 /* 1350 * 3. If the request still hasn't completed, allocate new clusters, 1351 * considering any cluster_offset of steps 1c or 2. 1352 */ 1353 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1354 if (ret < 0) { 1355 return ret; 1356 } else if (ret) { 1357 continue; 1358 } else { 1359 assert(cur_bytes == 0); 1360 break; 1361 } 1362 } 1363 1364 *num -= remaining >> BDRV_SECTOR_BITS; 1365 assert(*num > 0); 1366 assert(*host_offset != 0); 1367 1368 return 0; 1369 } 1370 1371 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1372 const uint8_t *buf, int buf_size) 1373 { 1374 z_stream strm1, *strm = &strm1; 1375 int ret, out_len; 1376 1377 memset(strm, 0, sizeof(*strm)); 1378 1379 strm->next_in = (uint8_t *)buf; 1380 strm->avail_in = buf_size; 1381 strm->next_out = out_buf; 1382 strm->avail_out = out_buf_size; 1383 1384 ret = inflateInit2(strm, -12); 1385 if (ret != Z_OK) 1386 return -1; 1387 ret = inflate(strm, Z_FINISH); 1388 out_len = strm->next_out - out_buf; 1389 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1390 out_len != out_buf_size) { 1391 inflateEnd(strm); 1392 return -1; 1393 } 1394 inflateEnd(strm); 1395 return 0; 1396 } 1397 1398 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1399 { 1400 BDRVQcowState *s = bs->opaque; 1401 int ret, csize, nb_csectors, sector_offset; 1402 uint64_t coffset; 1403 1404 coffset = cluster_offset & s->cluster_offset_mask; 1405 if (s->cluster_cache_offset != coffset) { 1406 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1407 sector_offset = coffset & 511; 1408 csize = nb_csectors * 512 - sector_offset; 1409 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1410 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 1411 if (ret < 0) { 1412 return ret; 1413 } 1414 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1415 s->cluster_data + sector_offset, csize) < 0) { 1416 return -EIO; 1417 } 1418 s->cluster_cache_offset = coffset; 1419 } 1420 return 0; 1421 } 1422 1423 /* 1424 * This discards as many clusters of nb_clusters as possible at once (i.e. 1425 * all clusters in the same L2 table) and returns the number of discarded 1426 * clusters. 1427 */ 1428 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1429 unsigned int nb_clusters, enum qcow2_discard_type type, bool full_discard) 1430 { 1431 BDRVQcowState *s = bs->opaque; 1432 uint64_t *l2_table; 1433 int l2_index; 1434 int ret; 1435 int i; 1436 1437 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1438 if (ret < 0) { 1439 return ret; 1440 } 1441 1442 /* Limit nb_clusters to one L2 table */ 1443 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1444 1445 for (i = 0; i < nb_clusters; i++) { 1446 uint64_t old_l2_entry; 1447 1448 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1449 1450 /* 1451 * If full_discard is false, make sure that a discarded area reads back 1452 * as zeroes for v3 images (we cannot do it for v2 without actually 1453 * writing a zero-filled buffer). We can skip the operation if the 1454 * cluster is already marked as zero, or if it's unallocated and we 1455 * don't have a backing file. 1456 * 1457 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1458 * holding s->lock, so that doesn't work today. 1459 * 1460 * If full_discard is true, the sector should not read back as zeroes, 1461 * but rather fall through to the backing file. 1462 */ 1463 switch (qcow2_get_cluster_type(old_l2_entry)) { 1464 case QCOW2_CLUSTER_UNALLOCATED: 1465 if (full_discard || !bs->backing_hd) { 1466 continue; 1467 } 1468 break; 1469 1470 case QCOW2_CLUSTER_ZERO: 1471 if (!full_discard) { 1472 continue; 1473 } 1474 break; 1475 1476 case QCOW2_CLUSTER_NORMAL: 1477 case QCOW2_CLUSTER_COMPRESSED: 1478 break; 1479 1480 default: 1481 abort(); 1482 } 1483 1484 /* First remove L2 entries */ 1485 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1486 if (!full_discard && s->qcow_version >= 3) { 1487 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1488 } else { 1489 l2_table[l2_index + i] = cpu_to_be64(0); 1490 } 1491 1492 /* Then decrease the refcount */ 1493 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1494 } 1495 1496 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1497 1498 return nb_clusters; 1499 } 1500 1501 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1502 int nb_sectors, enum qcow2_discard_type type, bool full_discard) 1503 { 1504 BDRVQcowState *s = bs->opaque; 1505 uint64_t end_offset; 1506 unsigned int nb_clusters; 1507 int ret; 1508 1509 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1510 1511 /* Round start up and end down */ 1512 offset = align_offset(offset, s->cluster_size); 1513 end_offset = start_of_cluster(s, end_offset); 1514 1515 if (offset > end_offset) { 1516 return 0; 1517 } 1518 1519 nb_clusters = size_to_clusters(s, end_offset - offset); 1520 1521 s->cache_discards = true; 1522 1523 /* Each L2 table is handled by its own loop iteration */ 1524 while (nb_clusters > 0) { 1525 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard); 1526 if (ret < 0) { 1527 goto fail; 1528 } 1529 1530 nb_clusters -= ret; 1531 offset += (ret * s->cluster_size); 1532 } 1533 1534 ret = 0; 1535 fail: 1536 s->cache_discards = false; 1537 qcow2_process_discards(bs, ret); 1538 1539 return ret; 1540 } 1541 1542 /* 1543 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1544 * all clusters in the same L2 table) and returns the number of zeroed 1545 * clusters. 1546 */ 1547 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1548 unsigned int nb_clusters) 1549 { 1550 BDRVQcowState *s = bs->opaque; 1551 uint64_t *l2_table; 1552 int l2_index; 1553 int ret; 1554 int i; 1555 1556 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1557 if (ret < 0) { 1558 return ret; 1559 } 1560 1561 /* Limit nb_clusters to one L2 table */ 1562 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1563 1564 for (i = 0; i < nb_clusters; i++) { 1565 uint64_t old_offset; 1566 1567 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1568 1569 /* Update L2 entries */ 1570 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1571 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1572 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1573 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1574 } else { 1575 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1576 } 1577 } 1578 1579 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1580 1581 return nb_clusters; 1582 } 1583 1584 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1585 { 1586 BDRVQcowState *s = bs->opaque; 1587 unsigned int nb_clusters; 1588 int ret; 1589 1590 /* The zero flag is only supported by version 3 and newer */ 1591 if (s->qcow_version < 3) { 1592 return -ENOTSUP; 1593 } 1594 1595 /* Each L2 table is handled by its own loop iteration */ 1596 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1597 1598 s->cache_discards = true; 1599 1600 while (nb_clusters > 0) { 1601 ret = zero_single_l2(bs, offset, nb_clusters); 1602 if (ret < 0) { 1603 goto fail; 1604 } 1605 1606 nb_clusters -= ret; 1607 offset += (ret * s->cluster_size); 1608 } 1609 1610 ret = 0; 1611 fail: 1612 s->cache_discards = false; 1613 qcow2_process_discards(bs, ret); 1614 1615 return ret; 1616 } 1617 1618 /* 1619 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1620 * non-backed non-pre-allocated zero clusters). 1621 * 1622 * l1_entries and *visited_l1_entries are used to keep track of progress for 1623 * status_cb(). l1_entries contains the total number of L1 entries and 1624 * *visited_l1_entries counts all visited L1 entries. 1625 */ 1626 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1627 int l1_size, int64_t *visited_l1_entries, 1628 int64_t l1_entries, 1629 BlockDriverAmendStatusCB *status_cb) 1630 { 1631 BDRVQcowState *s = bs->opaque; 1632 bool is_active_l1 = (l1_table == s->l1_table); 1633 uint64_t *l2_table = NULL; 1634 int ret; 1635 int i, j; 1636 1637 if (!is_active_l1) { 1638 /* inactive L2 tables require a buffer to be stored in when loading 1639 * them from disk */ 1640 l2_table = qemu_try_blockalign(bs->file, s->cluster_size); 1641 if (l2_table == NULL) { 1642 return -ENOMEM; 1643 } 1644 } 1645 1646 for (i = 0; i < l1_size; i++) { 1647 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1648 bool l2_dirty = false; 1649 uint64_t l2_refcount; 1650 1651 if (!l2_offset) { 1652 /* unallocated */ 1653 (*visited_l1_entries)++; 1654 if (status_cb) { 1655 status_cb(bs, *visited_l1_entries, l1_entries); 1656 } 1657 continue; 1658 } 1659 1660 if (offset_into_cluster(s, l2_offset)) { 1661 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" 1662 PRIx64 " unaligned (L1 index: %#x)", 1663 l2_offset, i); 1664 ret = -EIO; 1665 goto fail; 1666 } 1667 1668 if (is_active_l1) { 1669 /* get active L2 tables from cache */ 1670 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1671 (void **)&l2_table); 1672 } else { 1673 /* load inactive L2 tables from disk */ 1674 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1675 (void *)l2_table, s->cluster_sectors); 1676 } 1677 if (ret < 0) { 1678 goto fail; 1679 } 1680 1681 ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits, 1682 &l2_refcount); 1683 if (ret < 0) { 1684 goto fail; 1685 } 1686 1687 for (j = 0; j < s->l2_size; j++) { 1688 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1689 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1690 int cluster_type = qcow2_get_cluster_type(l2_entry); 1691 bool preallocated = offset != 0; 1692 1693 if (cluster_type != QCOW2_CLUSTER_ZERO) { 1694 continue; 1695 } 1696 1697 if (!preallocated) { 1698 if (!bs->backing_hd) { 1699 /* not backed; therefore we can simply deallocate the 1700 * cluster */ 1701 l2_table[j] = 0; 1702 l2_dirty = true; 1703 continue; 1704 } 1705 1706 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1707 if (offset < 0) { 1708 ret = offset; 1709 goto fail; 1710 } 1711 1712 if (l2_refcount > 1) { 1713 /* For shared L2 tables, set the refcount accordingly (it is 1714 * already 1 and needs to be l2_refcount) */ 1715 ret = qcow2_update_cluster_refcount(bs, 1716 offset >> s->cluster_bits, 1717 refcount_diff(1, l2_refcount), false, 1718 QCOW2_DISCARD_OTHER); 1719 if (ret < 0) { 1720 qcow2_free_clusters(bs, offset, s->cluster_size, 1721 QCOW2_DISCARD_OTHER); 1722 goto fail; 1723 } 1724 } 1725 } 1726 1727 if (offset_into_cluster(s, offset)) { 1728 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 1729 "%#" PRIx64 " unaligned (L2 offset: %#" 1730 PRIx64 ", L2 index: %#x)", offset, 1731 l2_offset, j); 1732 if (!preallocated) { 1733 qcow2_free_clusters(bs, offset, s->cluster_size, 1734 QCOW2_DISCARD_ALWAYS); 1735 } 1736 ret = -EIO; 1737 goto fail; 1738 } 1739 1740 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1741 if (ret < 0) { 1742 if (!preallocated) { 1743 qcow2_free_clusters(bs, offset, s->cluster_size, 1744 QCOW2_DISCARD_ALWAYS); 1745 } 1746 goto fail; 1747 } 1748 1749 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, 1750 s->cluster_sectors, 0); 1751 if (ret < 0) { 1752 if (!preallocated) { 1753 qcow2_free_clusters(bs, offset, s->cluster_size, 1754 QCOW2_DISCARD_ALWAYS); 1755 } 1756 goto fail; 1757 } 1758 1759 if (l2_refcount == 1) { 1760 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1761 } else { 1762 l2_table[j] = cpu_to_be64(offset); 1763 } 1764 l2_dirty = true; 1765 } 1766 1767 if (is_active_l1) { 1768 if (l2_dirty) { 1769 qcow2_cache_entry_mark_dirty(bs, s->l2_table_cache, l2_table); 1770 qcow2_cache_depends_on_flush(s->l2_table_cache); 1771 } 1772 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1773 } else { 1774 if (l2_dirty) { 1775 ret = qcow2_pre_write_overlap_check(bs, 1776 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1777 s->cluster_size); 1778 if (ret < 0) { 1779 goto fail; 1780 } 1781 1782 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1783 (void *)l2_table, s->cluster_sectors); 1784 if (ret < 0) { 1785 goto fail; 1786 } 1787 } 1788 } 1789 1790 (*visited_l1_entries)++; 1791 if (status_cb) { 1792 status_cb(bs, *visited_l1_entries, l1_entries); 1793 } 1794 } 1795 1796 ret = 0; 1797 1798 fail: 1799 if (l2_table) { 1800 if (!is_active_l1) { 1801 qemu_vfree(l2_table); 1802 } else { 1803 qcow2_cache_put(bs, s->l2_table_cache, (void **) &l2_table); 1804 } 1805 } 1806 return ret; 1807 } 1808 1809 /* 1810 * For backed images, expands all zero clusters on the image. For non-backed 1811 * images, deallocates all non-pre-allocated zero clusters (and claims the 1812 * allocation for pre-allocated ones). This is important for downgrading to a 1813 * qcow2 version which doesn't yet support metadata zero clusters. 1814 */ 1815 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1816 BlockDriverAmendStatusCB *status_cb) 1817 { 1818 BDRVQcowState *s = bs->opaque; 1819 uint64_t *l1_table = NULL; 1820 int64_t l1_entries = 0, visited_l1_entries = 0; 1821 int ret; 1822 int i, j; 1823 1824 if (status_cb) { 1825 l1_entries = s->l1_size; 1826 for (i = 0; i < s->nb_snapshots; i++) { 1827 l1_entries += s->snapshots[i].l1_size; 1828 } 1829 } 1830 1831 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1832 &visited_l1_entries, l1_entries, 1833 status_cb); 1834 if (ret < 0) { 1835 goto fail; 1836 } 1837 1838 /* Inactive L1 tables may point to active L2 tables - therefore it is 1839 * necessary to flush the L2 table cache before trying to access the L2 1840 * tables pointed to by inactive L1 entries (else we might try to expand 1841 * zero clusters that have already been expanded); furthermore, it is also 1842 * necessary to empty the L2 table cache, since it may contain tables which 1843 * are now going to be modified directly on disk, bypassing the cache. 1844 * qcow2_cache_empty() does both for us. */ 1845 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1846 if (ret < 0) { 1847 goto fail; 1848 } 1849 1850 for (i = 0; i < s->nb_snapshots; i++) { 1851 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1852 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1853 1854 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1855 1856 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / 1857 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); 1858 if (ret < 0) { 1859 goto fail; 1860 } 1861 1862 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1863 be64_to_cpus(&l1_table[j]); 1864 } 1865 1866 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1867 &visited_l1_entries, l1_entries, 1868 status_cb); 1869 if (ret < 0) { 1870 goto fail; 1871 } 1872 } 1873 1874 ret = 0; 1875 1876 fail: 1877 g_free(l1_table); 1878 return ret; 1879 } 1880