1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 33 bool exact_size) 34 { 35 BDRVQcowState *s = bs->opaque; 36 int new_l1_size2, ret, i; 37 uint64_t *new_l1_table; 38 int64_t old_l1_table_offset, old_l1_size; 39 int64_t new_l1_table_offset, new_l1_size; 40 uint8_t data[12]; 41 42 if (min_size <= s->l1_size) 43 return 0; 44 45 /* Do a sanity check on min_size before trying to calculate new_l1_size 46 * (this prevents overflows during the while loop for the calculation of 47 * new_l1_size) */ 48 if (min_size > INT_MAX / sizeof(uint64_t)) { 49 return -EFBIG; 50 } 51 52 if (exact_size) { 53 new_l1_size = min_size; 54 } else { 55 /* Bump size up to reduce the number of times we have to grow */ 56 new_l1_size = s->l1_size; 57 if (new_l1_size == 0) { 58 new_l1_size = 1; 59 } 60 while (min_size > new_l1_size) { 61 new_l1_size = (new_l1_size * 3 + 1) / 2; 62 } 63 } 64 65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) { 66 return -EFBIG; 67 } 68 69 #ifdef DEBUG_ALLOC2 70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 71 s->l1_size, new_l1_size); 72 #endif 73 74 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 75 new_l1_table = qemu_try_blockalign(bs->file, 76 align_offset(new_l1_size2, 512)); 77 if (new_l1_table == NULL) { 78 return -ENOMEM; 79 } 80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 81 82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 83 84 /* write new table (align to cluster) */ 85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 87 if (new_l1_table_offset < 0) { 88 qemu_vfree(new_l1_table); 89 return new_l1_table_offset; 90 } 91 92 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 93 if (ret < 0) { 94 goto fail; 95 } 96 97 /* the L1 position has not yet been updated, so these clusters must 98 * indeed be completely free */ 99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 100 new_l1_size2); 101 if (ret < 0) { 102 goto fail; 103 } 104 105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 106 for(i = 0; i < s->l1_size; i++) 107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 109 if (ret < 0) 110 goto fail; 111 for(i = 0; i < s->l1_size; i++) 112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 113 114 /* set new table */ 115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 116 cpu_to_be32w((uint32_t*)data, new_l1_size); 117 stq_be_p(data + 4, new_l1_table_offset); 118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 119 if (ret < 0) { 120 goto fail; 121 } 122 qemu_vfree(s->l1_table); 123 old_l1_table_offset = s->l1_table_offset; 124 s->l1_table_offset = new_l1_table_offset; 125 s->l1_table = new_l1_table; 126 old_l1_size = s->l1_size; 127 s->l1_size = new_l1_size; 128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 129 QCOW2_DISCARD_OTHER); 130 return 0; 131 fail: 132 qemu_vfree(new_l1_table); 133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 134 QCOW2_DISCARD_OTHER); 135 return ret; 136 } 137 138 /* 139 * l2_load 140 * 141 * Loads a L2 table into memory. If the table is in the cache, the cache 142 * is used; otherwise the L2 table is loaded from the image file. 143 * 144 * Returns a pointer to the L2 table on success, or NULL if the read from 145 * the image file failed. 146 */ 147 148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 149 uint64_t **l2_table) 150 { 151 BDRVQcowState *s = bs->opaque; 152 int ret; 153 154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 155 156 return ret; 157 } 158 159 /* 160 * Writes one sector of the L1 table to the disk (can't update single entries 161 * and we really don't want bdrv_pread to perform a read-modify-write) 162 */ 163 #define L1_ENTRIES_PER_SECTOR (512 / 8) 164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 165 { 166 BDRVQcowState *s = bs->opaque; 167 uint64_t buf[L1_ENTRIES_PER_SECTOR]; 168 int l1_start_index; 169 int i, ret; 170 171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 172 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { 173 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 174 } 175 176 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 177 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 178 if (ret < 0) { 179 return ret; 180 } 181 182 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 183 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 184 buf, sizeof(buf)); 185 if (ret < 0) { 186 return ret; 187 } 188 189 return 0; 190 } 191 192 /* 193 * l2_allocate 194 * 195 * Allocate a new l2 entry in the file. If l1_index points to an already 196 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 197 * table) copy the contents of the old L2 table into the newly allocated one. 198 * Otherwise the new table is initialized with zeros. 199 * 200 */ 201 202 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 203 { 204 BDRVQcowState *s = bs->opaque; 205 uint64_t old_l2_offset; 206 uint64_t *l2_table = NULL; 207 int64_t l2_offset; 208 int ret; 209 210 old_l2_offset = s->l1_table[l1_index]; 211 212 trace_qcow2_l2_allocate(bs, l1_index); 213 214 /* allocate a new l2 entry */ 215 216 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 217 if (l2_offset < 0) { 218 ret = l2_offset; 219 goto fail; 220 } 221 222 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 223 if (ret < 0) { 224 goto fail; 225 } 226 227 /* allocate a new entry in the l2 cache */ 228 229 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 230 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 231 if (ret < 0) { 232 goto fail; 233 } 234 235 l2_table = *table; 236 237 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 238 /* if there was no old l2 table, clear the new table */ 239 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 240 } else { 241 uint64_t* old_table; 242 243 /* if there was an old l2 table, read it from the disk */ 244 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 245 ret = qcow2_cache_get(bs, s->l2_table_cache, 246 old_l2_offset & L1E_OFFSET_MASK, 247 (void**) &old_table); 248 if (ret < 0) { 249 goto fail; 250 } 251 252 memcpy(l2_table, old_table, s->cluster_size); 253 254 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); 255 if (ret < 0) { 256 goto fail; 257 } 258 } 259 260 /* write the l2 table to the file */ 261 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 262 263 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 264 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 265 ret = qcow2_cache_flush(bs, s->l2_table_cache); 266 if (ret < 0) { 267 goto fail; 268 } 269 270 /* update the L1 entry */ 271 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 272 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 273 ret = qcow2_write_l1_entry(bs, l1_index); 274 if (ret < 0) { 275 goto fail; 276 } 277 278 *table = l2_table; 279 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 280 return 0; 281 282 fail: 283 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 284 if (l2_table != NULL) { 285 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 286 } 287 s->l1_table[l1_index] = old_l2_offset; 288 if (l2_offset > 0) { 289 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 290 QCOW2_DISCARD_ALWAYS); 291 } 292 return ret; 293 } 294 295 /* 296 * Checks how many clusters in a given L2 table are contiguous in the image 297 * file. As soon as one of the flags in the bitmask stop_flags changes compared 298 * to the first cluster, the search is stopped and the cluster is not counted 299 * as contiguous. (This allows it, for example, to stop at the first compressed 300 * cluster which may require a different handling) 301 */ 302 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 303 uint64_t *l2_table, uint64_t stop_flags) 304 { 305 int i; 306 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 307 uint64_t first_entry = be64_to_cpu(l2_table[0]); 308 uint64_t offset = first_entry & mask; 309 310 if (!offset) 311 return 0; 312 313 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); 314 315 for (i = 0; i < nb_clusters; i++) { 316 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 317 if (offset + (uint64_t) i * cluster_size != l2_entry) { 318 break; 319 } 320 } 321 322 return i; 323 } 324 325 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 326 { 327 int i; 328 329 for (i = 0; i < nb_clusters; i++) { 330 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 331 332 if (type != QCOW2_CLUSTER_UNALLOCATED) { 333 break; 334 } 335 } 336 337 return i; 338 } 339 340 /* The crypt function is compatible with the linux cryptoloop 341 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 342 supported */ 343 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 344 uint8_t *out_buf, const uint8_t *in_buf, 345 int nb_sectors, int enc, 346 const AES_KEY *key) 347 { 348 union { 349 uint64_t ll[2]; 350 uint8_t b[16]; 351 } ivec; 352 int i; 353 354 for(i = 0; i < nb_sectors; i++) { 355 ivec.ll[0] = cpu_to_le64(sector_num); 356 ivec.ll[1] = 0; 357 AES_cbc_encrypt(in_buf, out_buf, 512, key, 358 ivec.b, enc); 359 sector_num++; 360 in_buf += 512; 361 out_buf += 512; 362 } 363 } 364 365 static int coroutine_fn copy_sectors(BlockDriverState *bs, 366 uint64_t start_sect, 367 uint64_t cluster_offset, 368 int n_start, int n_end) 369 { 370 BDRVQcowState *s = bs->opaque; 371 QEMUIOVector qiov; 372 struct iovec iov; 373 int n, ret; 374 375 n = n_end - n_start; 376 if (n <= 0) { 377 return 0; 378 } 379 380 iov.iov_len = n * BDRV_SECTOR_SIZE; 381 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 382 if (iov.iov_base == NULL) { 383 return -ENOMEM; 384 } 385 386 qemu_iovec_init_external(&qiov, &iov, 1); 387 388 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 389 390 if (!bs->drv) { 391 ret = -ENOMEDIUM; 392 goto out; 393 } 394 395 /* Call .bdrv_co_readv() directly instead of using the public block-layer 396 * interface. This avoids double I/O throttling and request tracking, 397 * which can lead to deadlock when block layer copy-on-read is enabled. 398 */ 399 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 400 if (ret < 0) { 401 goto out; 402 } 403 404 if (s->crypt_method) { 405 qcow2_encrypt_sectors(s, start_sect + n_start, 406 iov.iov_base, iov.iov_base, n, 1, 407 &s->aes_encrypt_key); 408 } 409 410 ret = qcow2_pre_write_overlap_check(bs, 0, 411 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 412 if (ret < 0) { 413 goto out; 414 } 415 416 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 417 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 418 if (ret < 0) { 419 goto out; 420 } 421 422 ret = 0; 423 out: 424 qemu_vfree(iov.iov_base); 425 return ret; 426 } 427 428 429 /* 430 * get_cluster_offset 431 * 432 * For a given offset of the disk image, find the cluster offset in 433 * qcow2 file. The offset is stored in *cluster_offset. 434 * 435 * on entry, *num is the number of contiguous sectors we'd like to 436 * access following offset. 437 * 438 * on exit, *num is the number of contiguous sectors we can read. 439 * 440 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 441 * cases. 442 */ 443 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 444 int *num, uint64_t *cluster_offset) 445 { 446 BDRVQcowState *s = bs->opaque; 447 unsigned int l2_index; 448 uint64_t l1_index, l2_offset, *l2_table; 449 int l1_bits, c; 450 unsigned int index_in_cluster, nb_clusters; 451 uint64_t nb_available, nb_needed; 452 int ret; 453 454 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 455 nb_needed = *num + index_in_cluster; 456 457 l1_bits = s->l2_bits + s->cluster_bits; 458 459 /* compute how many bytes there are between the offset and 460 * the end of the l1 entry 461 */ 462 463 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 464 465 /* compute the number of available sectors */ 466 467 nb_available = (nb_available >> 9) + index_in_cluster; 468 469 if (nb_needed > nb_available) { 470 nb_needed = nb_available; 471 } 472 473 *cluster_offset = 0; 474 475 /* seek the the l2 offset in the l1 table */ 476 477 l1_index = offset >> l1_bits; 478 if (l1_index >= s->l1_size) { 479 ret = QCOW2_CLUSTER_UNALLOCATED; 480 goto out; 481 } 482 483 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 484 if (!l2_offset) { 485 ret = QCOW2_CLUSTER_UNALLOCATED; 486 goto out; 487 } 488 489 if (offset_into_cluster(s, l2_offset)) { 490 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 491 " unaligned (L1 index: %#" PRIx64 ")", 492 l2_offset, l1_index); 493 return -EIO; 494 } 495 496 /* load the l2 table in memory */ 497 498 ret = l2_load(bs, l2_offset, &l2_table); 499 if (ret < 0) { 500 return ret; 501 } 502 503 /* find the cluster offset for the given disk offset */ 504 505 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 506 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 507 nb_clusters = size_to_clusters(s, nb_needed << 9); 508 509 ret = qcow2_get_cluster_type(*cluster_offset); 510 switch (ret) { 511 case QCOW2_CLUSTER_COMPRESSED: 512 /* Compressed clusters can only be processed one by one */ 513 c = 1; 514 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 515 break; 516 case QCOW2_CLUSTER_ZERO: 517 if (s->qcow_version < 3) { 518 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 519 " in pre-v3 image (L2 offset: %#" PRIx64 520 ", L2 index: %#x)", l2_offset, l2_index); 521 ret = -EIO; 522 goto fail; 523 } 524 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 525 &l2_table[l2_index], QCOW_OFLAG_ZERO); 526 *cluster_offset = 0; 527 break; 528 case QCOW2_CLUSTER_UNALLOCATED: 529 /* how many empty clusters ? */ 530 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 531 *cluster_offset = 0; 532 break; 533 case QCOW2_CLUSTER_NORMAL: 534 /* how many allocated clusters ? */ 535 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 536 &l2_table[l2_index], QCOW_OFLAG_ZERO); 537 *cluster_offset &= L2E_OFFSET_MASK; 538 if (offset_into_cluster(s, *cluster_offset)) { 539 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 540 PRIx64 " unaligned (L2 offset: %#" PRIx64 541 ", L2 index: %#x)", *cluster_offset, 542 l2_offset, l2_index); 543 ret = -EIO; 544 goto fail; 545 } 546 break; 547 default: 548 abort(); 549 } 550 551 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 552 553 nb_available = (c * s->cluster_sectors); 554 555 out: 556 if (nb_available > nb_needed) 557 nb_available = nb_needed; 558 559 *num = nb_available - index_in_cluster; 560 561 return ret; 562 563 fail: 564 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 565 return ret; 566 } 567 568 /* 569 * get_cluster_table 570 * 571 * for a given disk offset, load (and allocate if needed) 572 * the l2 table. 573 * 574 * the l2 table offset in the qcow2 file and the cluster index 575 * in the l2 table are given to the caller. 576 * 577 * Returns 0 on success, -errno in failure case 578 */ 579 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 580 uint64_t **new_l2_table, 581 int *new_l2_index) 582 { 583 BDRVQcowState *s = bs->opaque; 584 unsigned int l2_index; 585 uint64_t l1_index, l2_offset; 586 uint64_t *l2_table = NULL; 587 int ret; 588 589 /* seek the the l2 offset in the l1 table */ 590 591 l1_index = offset >> (s->l2_bits + s->cluster_bits); 592 if (l1_index >= s->l1_size) { 593 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 594 if (ret < 0) { 595 return ret; 596 } 597 } 598 599 assert(l1_index < s->l1_size); 600 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 601 if (offset_into_cluster(s, l2_offset)) { 602 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 603 " unaligned (L1 index: %#" PRIx64 ")", 604 l2_offset, l1_index); 605 return -EIO; 606 } 607 608 /* seek the l2 table of the given l2 offset */ 609 610 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 611 /* load the l2 table in memory */ 612 ret = l2_load(bs, l2_offset, &l2_table); 613 if (ret < 0) { 614 return ret; 615 } 616 } else { 617 /* First allocate a new L2 table (and do COW if needed) */ 618 ret = l2_allocate(bs, l1_index, &l2_table); 619 if (ret < 0) { 620 return ret; 621 } 622 623 /* Then decrease the refcount of the old table */ 624 if (l2_offset) { 625 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 626 QCOW2_DISCARD_OTHER); 627 } 628 } 629 630 /* find the cluster offset for the given disk offset */ 631 632 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 633 634 *new_l2_table = l2_table; 635 *new_l2_index = l2_index; 636 637 return 0; 638 } 639 640 /* 641 * alloc_compressed_cluster_offset 642 * 643 * For a given offset of the disk image, return cluster offset in 644 * qcow2 file. 645 * 646 * If the offset is not found, allocate a new compressed cluster. 647 * 648 * Return the cluster offset if successful, 649 * Return 0, otherwise. 650 * 651 */ 652 653 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 654 uint64_t offset, 655 int compressed_size) 656 { 657 BDRVQcowState *s = bs->opaque; 658 int l2_index, ret; 659 uint64_t *l2_table; 660 int64_t cluster_offset; 661 int nb_csectors; 662 663 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 664 if (ret < 0) { 665 return 0; 666 } 667 668 /* Compression can't overwrite anything. Fail if the cluster was already 669 * allocated. */ 670 cluster_offset = be64_to_cpu(l2_table[l2_index]); 671 if (cluster_offset & L2E_OFFSET_MASK) { 672 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 673 return 0; 674 } 675 676 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 677 if (cluster_offset < 0) { 678 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 679 return 0; 680 } 681 682 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 683 (cluster_offset >> 9); 684 685 cluster_offset |= QCOW_OFLAG_COMPRESSED | 686 ((uint64_t)nb_csectors << s->csize_shift); 687 688 /* update L2 table */ 689 690 /* compressed clusters never have the copied flag */ 691 692 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 693 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 694 l2_table[l2_index] = cpu_to_be64(cluster_offset); 695 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 696 if (ret < 0) { 697 return 0; 698 } 699 700 return cluster_offset; 701 } 702 703 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 704 { 705 BDRVQcowState *s = bs->opaque; 706 int ret; 707 708 if (r->nb_sectors == 0) { 709 return 0; 710 } 711 712 qemu_co_mutex_unlock(&s->lock); 713 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 714 r->offset / BDRV_SECTOR_SIZE, 715 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 716 qemu_co_mutex_lock(&s->lock); 717 718 if (ret < 0) { 719 return ret; 720 } 721 722 /* 723 * Before we update the L2 table to actually point to the new cluster, we 724 * need to be sure that the refcounts have been increased and COW was 725 * handled. 726 */ 727 qcow2_cache_depends_on_flush(s->l2_table_cache); 728 729 return 0; 730 } 731 732 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 733 { 734 BDRVQcowState *s = bs->opaque; 735 int i, j = 0, l2_index, ret; 736 uint64_t *old_cluster, *l2_table; 737 uint64_t cluster_offset = m->alloc_offset; 738 739 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 740 assert(m->nb_clusters > 0); 741 742 old_cluster = g_try_new(uint64_t, m->nb_clusters); 743 if (old_cluster == NULL) { 744 ret = -ENOMEM; 745 goto err; 746 } 747 748 /* copy content of unmodified sectors */ 749 ret = perform_cow(bs, m, &m->cow_start); 750 if (ret < 0) { 751 goto err; 752 } 753 754 ret = perform_cow(bs, m, &m->cow_end); 755 if (ret < 0) { 756 goto err; 757 } 758 759 /* Update L2 table. */ 760 if (s->use_lazy_refcounts) { 761 qcow2_mark_dirty(bs); 762 } 763 if (qcow2_need_accurate_refcounts(s)) { 764 qcow2_cache_set_dependency(bs, s->l2_table_cache, 765 s->refcount_block_cache); 766 } 767 768 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 769 if (ret < 0) { 770 goto err; 771 } 772 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 773 774 assert(l2_index + m->nb_clusters <= s->l2_size); 775 for (i = 0; i < m->nb_clusters; i++) { 776 /* if two concurrent writes happen to the same unallocated cluster 777 * each write allocates separate cluster and writes data concurrently. 778 * The first one to complete updates l2 table with pointer to its 779 * cluster the second one has to do RMW (which is done above by 780 * copy_sectors()), update l2 table with its cluster pointer and free 781 * old cluster. This is what this loop does */ 782 if(l2_table[l2_index + i] != 0) 783 old_cluster[j++] = l2_table[l2_index + i]; 784 785 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 786 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 787 } 788 789 790 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 791 if (ret < 0) { 792 goto err; 793 } 794 795 /* 796 * If this was a COW, we need to decrease the refcount of the old cluster. 797 * Also flush bs->file to get the right order for L2 and refcount update. 798 * 799 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 800 * clusters), the next write will reuse them anyway. 801 */ 802 if (j != 0) { 803 for (i = 0; i < j; i++) { 804 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 805 QCOW2_DISCARD_NEVER); 806 } 807 } 808 809 ret = 0; 810 err: 811 g_free(old_cluster); 812 return ret; 813 } 814 815 /* 816 * Returns the number of contiguous clusters that can be used for an allocating 817 * write, but require COW to be performed (this includes yet unallocated space, 818 * which must copy from the backing file) 819 */ 820 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, 821 uint64_t *l2_table, int l2_index) 822 { 823 int i; 824 825 for (i = 0; i < nb_clusters; i++) { 826 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 827 int cluster_type = qcow2_get_cluster_type(l2_entry); 828 829 switch(cluster_type) { 830 case QCOW2_CLUSTER_NORMAL: 831 if (l2_entry & QCOW_OFLAG_COPIED) { 832 goto out; 833 } 834 break; 835 case QCOW2_CLUSTER_UNALLOCATED: 836 case QCOW2_CLUSTER_COMPRESSED: 837 case QCOW2_CLUSTER_ZERO: 838 break; 839 default: 840 abort(); 841 } 842 } 843 844 out: 845 assert(i <= nb_clusters); 846 return i; 847 } 848 849 /* 850 * Check if there already is an AIO write request in flight which allocates 851 * the same cluster. In this case we need to wait until the previous 852 * request has completed and updated the L2 table accordingly. 853 * 854 * Returns: 855 * 0 if there was no dependency. *cur_bytes indicates the number of 856 * bytes from guest_offset that can be read before the next 857 * dependency must be processed (or the request is complete) 858 * 859 * -EAGAIN if we had to wait for another request, previously gathered 860 * information on cluster allocation may be invalid now. The caller 861 * must start over anyway, so consider *cur_bytes undefined. 862 */ 863 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 864 uint64_t *cur_bytes, QCowL2Meta **m) 865 { 866 BDRVQcowState *s = bs->opaque; 867 QCowL2Meta *old_alloc; 868 uint64_t bytes = *cur_bytes; 869 870 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 871 872 uint64_t start = guest_offset; 873 uint64_t end = start + bytes; 874 uint64_t old_start = l2meta_cow_start(old_alloc); 875 uint64_t old_end = l2meta_cow_end(old_alloc); 876 877 if (end <= old_start || start >= old_end) { 878 /* No intersection */ 879 } else { 880 if (start < old_start) { 881 /* Stop at the start of a running allocation */ 882 bytes = old_start - start; 883 } else { 884 bytes = 0; 885 } 886 887 /* Stop if already an l2meta exists. After yielding, it wouldn't 888 * be valid any more, so we'd have to clean up the old L2Metas 889 * and deal with requests depending on them before starting to 890 * gather new ones. Not worth the trouble. */ 891 if (bytes == 0 && *m) { 892 *cur_bytes = 0; 893 return 0; 894 } 895 896 if (bytes == 0) { 897 /* Wait for the dependency to complete. We need to recheck 898 * the free/allocated clusters when we continue. */ 899 qemu_co_mutex_unlock(&s->lock); 900 qemu_co_queue_wait(&old_alloc->dependent_requests); 901 qemu_co_mutex_lock(&s->lock); 902 return -EAGAIN; 903 } 904 } 905 } 906 907 /* Make sure that existing clusters and new allocations are only used up to 908 * the next dependency if we shortened the request above */ 909 *cur_bytes = bytes; 910 911 return 0; 912 } 913 914 /* 915 * Checks how many already allocated clusters that don't require a copy on 916 * write there are at the given guest_offset (up to *bytes). If 917 * *host_offset is not zero, only physically contiguous clusters beginning at 918 * this host offset are counted. 919 * 920 * Note that guest_offset may not be cluster aligned. In this case, the 921 * returned *host_offset points to exact byte referenced by guest_offset and 922 * therefore isn't cluster aligned as well. 923 * 924 * Returns: 925 * 0: if no allocated clusters are available at the given offset. 926 * *bytes is normally unchanged. It is set to 0 if the cluster 927 * is allocated and doesn't need COW, but doesn't have the right 928 * physical offset. 929 * 930 * 1: if allocated clusters that don't require a COW are available at 931 * the requested offset. *bytes may have decreased and describes 932 * the length of the area that can be written to. 933 * 934 * -errno: in error cases 935 */ 936 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 937 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 938 { 939 BDRVQcowState *s = bs->opaque; 940 int l2_index; 941 uint64_t cluster_offset; 942 uint64_t *l2_table; 943 unsigned int nb_clusters; 944 unsigned int keep_clusters; 945 int ret, pret; 946 947 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 948 *bytes); 949 950 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 951 == offset_into_cluster(s, *host_offset)); 952 953 /* 954 * Calculate the number of clusters to look for. We stop at L2 table 955 * boundaries to keep things simple. 956 */ 957 nb_clusters = 958 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 959 960 l2_index = offset_to_l2_index(s, guest_offset); 961 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 962 963 /* Find L2 entry for the first involved cluster */ 964 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 965 if (ret < 0) { 966 return ret; 967 } 968 969 cluster_offset = be64_to_cpu(l2_table[l2_index]); 970 971 /* Check how many clusters are already allocated and don't need COW */ 972 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 973 && (cluster_offset & QCOW_OFLAG_COPIED)) 974 { 975 /* If a specific host_offset is required, check it */ 976 bool offset_matches = 977 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 978 979 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 980 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 981 "%#llx unaligned (guest offset: %#" PRIx64 982 ")", cluster_offset & L2E_OFFSET_MASK, 983 guest_offset); 984 ret = -EIO; 985 goto out; 986 } 987 988 if (*host_offset != 0 && !offset_matches) { 989 *bytes = 0; 990 ret = 0; 991 goto out; 992 } 993 994 /* We keep all QCOW_OFLAG_COPIED clusters */ 995 keep_clusters = 996 count_contiguous_clusters(nb_clusters, s->cluster_size, 997 &l2_table[l2_index], 998 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 999 assert(keep_clusters <= nb_clusters); 1000 1001 *bytes = MIN(*bytes, 1002 keep_clusters * s->cluster_size 1003 - offset_into_cluster(s, guest_offset)); 1004 1005 ret = 1; 1006 } else { 1007 ret = 0; 1008 } 1009 1010 /* Cleanup */ 1011 out: 1012 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1013 if (pret < 0) { 1014 return pret; 1015 } 1016 1017 /* Only return a host offset if we actually made progress. Otherwise we 1018 * would make requirements for handle_alloc() that it can't fulfill */ 1019 if (ret > 0) { 1020 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1021 + offset_into_cluster(s, guest_offset); 1022 } 1023 1024 return ret; 1025 } 1026 1027 /* 1028 * Allocates new clusters for the given guest_offset. 1029 * 1030 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1031 * contain the number of clusters that have been allocated and are contiguous 1032 * in the image file. 1033 * 1034 * If *host_offset is non-zero, it specifies the offset in the image file at 1035 * which the new clusters must start. *nb_clusters can be 0 on return in this 1036 * case if the cluster at host_offset is already in use. If *host_offset is 1037 * zero, the clusters can be allocated anywhere in the image file. 1038 * 1039 * *host_offset is updated to contain the offset into the image file at which 1040 * the first allocated cluster starts. 1041 * 1042 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1043 * function has been waiting for another request and the allocation must be 1044 * restarted, but the whole request should not be failed. 1045 */ 1046 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1047 uint64_t *host_offset, unsigned int *nb_clusters) 1048 { 1049 BDRVQcowState *s = bs->opaque; 1050 1051 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1052 *host_offset, *nb_clusters); 1053 1054 /* Allocate new clusters */ 1055 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1056 if (*host_offset == 0) { 1057 int64_t cluster_offset = 1058 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1059 if (cluster_offset < 0) { 1060 return cluster_offset; 1061 } 1062 *host_offset = cluster_offset; 1063 return 0; 1064 } else { 1065 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1066 if (ret < 0) { 1067 return ret; 1068 } 1069 *nb_clusters = ret; 1070 return 0; 1071 } 1072 } 1073 1074 /* 1075 * Allocates new clusters for an area that either is yet unallocated or needs a 1076 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1077 * the new allocation can match the specified host offset. 1078 * 1079 * Note that guest_offset may not be cluster aligned. In this case, the 1080 * returned *host_offset points to exact byte referenced by guest_offset and 1081 * therefore isn't cluster aligned as well. 1082 * 1083 * Returns: 1084 * 0: if no clusters could be allocated. *bytes is set to 0, 1085 * *host_offset is left unchanged. 1086 * 1087 * 1: if new clusters were allocated. *bytes may be decreased if the 1088 * new allocation doesn't cover all of the requested area. 1089 * *host_offset is updated to contain the host offset of the first 1090 * newly allocated cluster. 1091 * 1092 * -errno: in error cases 1093 */ 1094 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1095 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1096 { 1097 BDRVQcowState *s = bs->opaque; 1098 int l2_index; 1099 uint64_t *l2_table; 1100 uint64_t entry; 1101 unsigned int nb_clusters; 1102 int ret; 1103 1104 uint64_t alloc_cluster_offset; 1105 1106 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1107 *bytes); 1108 assert(*bytes > 0); 1109 1110 /* 1111 * Calculate the number of clusters to look for. We stop at L2 table 1112 * boundaries to keep things simple. 1113 */ 1114 nb_clusters = 1115 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1116 1117 l2_index = offset_to_l2_index(s, guest_offset); 1118 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1119 1120 /* Find L2 entry for the first involved cluster */ 1121 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1122 if (ret < 0) { 1123 return ret; 1124 } 1125 1126 entry = be64_to_cpu(l2_table[l2_index]); 1127 1128 /* For the moment, overwrite compressed clusters one by one */ 1129 if (entry & QCOW_OFLAG_COMPRESSED) { 1130 nb_clusters = 1; 1131 } else { 1132 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1133 } 1134 1135 /* This function is only called when there were no non-COW clusters, so if 1136 * we can't find any unallocated or COW clusters either, something is 1137 * wrong with our code. */ 1138 assert(nb_clusters > 0); 1139 1140 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1141 if (ret < 0) { 1142 return ret; 1143 } 1144 1145 /* Allocate, if necessary at a given offset in the image file */ 1146 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1147 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1148 &nb_clusters); 1149 if (ret < 0) { 1150 goto fail; 1151 } 1152 1153 /* Can't extend contiguous allocation */ 1154 if (nb_clusters == 0) { 1155 *bytes = 0; 1156 return 0; 1157 } 1158 1159 /* !*host_offset would overwrite the image header and is reserved for "no 1160 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1161 * following overlap check; do that now to avoid having an invalid value in 1162 * *host_offset. */ 1163 if (!alloc_cluster_offset) { 1164 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1165 nb_clusters * s->cluster_size); 1166 assert(ret < 0); 1167 goto fail; 1168 } 1169 1170 /* 1171 * Save info needed for meta data update. 1172 * 1173 * requested_sectors: Number of sectors from the start of the first 1174 * newly allocated cluster to the end of the (possibly shortened 1175 * before) write request. 1176 * 1177 * avail_sectors: Number of sectors from the start of the first 1178 * newly allocated to the end of the last newly allocated cluster. 1179 * 1180 * nb_sectors: The number of sectors from the start of the first 1181 * newly allocated cluster to the end of the area that the write 1182 * request actually writes to (excluding COW at the end) 1183 */ 1184 int requested_sectors = 1185 (*bytes + offset_into_cluster(s, guest_offset)) 1186 >> BDRV_SECTOR_BITS; 1187 int avail_sectors = nb_clusters 1188 << (s->cluster_bits - BDRV_SECTOR_BITS); 1189 int alloc_n_start = offset_into_cluster(s, guest_offset) 1190 >> BDRV_SECTOR_BITS; 1191 int nb_sectors = MIN(requested_sectors, avail_sectors); 1192 QCowL2Meta *old_m = *m; 1193 1194 *m = g_malloc0(sizeof(**m)); 1195 1196 **m = (QCowL2Meta) { 1197 .next = old_m, 1198 1199 .alloc_offset = alloc_cluster_offset, 1200 .offset = start_of_cluster(s, guest_offset), 1201 .nb_clusters = nb_clusters, 1202 .nb_available = nb_sectors, 1203 1204 .cow_start = { 1205 .offset = 0, 1206 .nb_sectors = alloc_n_start, 1207 }, 1208 .cow_end = { 1209 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1210 .nb_sectors = avail_sectors - nb_sectors, 1211 }, 1212 }; 1213 qemu_co_queue_init(&(*m)->dependent_requests); 1214 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1215 1216 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1217 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1218 - offset_into_cluster(s, guest_offset)); 1219 assert(*bytes != 0); 1220 1221 return 1; 1222 1223 fail: 1224 if (*m && (*m)->nb_clusters > 0) { 1225 QLIST_REMOVE(*m, next_in_flight); 1226 } 1227 return ret; 1228 } 1229 1230 /* 1231 * alloc_cluster_offset 1232 * 1233 * For a given offset on the virtual disk, find the cluster offset in qcow2 1234 * file. If the offset is not found, allocate a new cluster. 1235 * 1236 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1237 * other fields in m are meaningless. 1238 * 1239 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1240 * contiguous clusters that have been allocated. In this case, the other 1241 * fields of m are valid and contain information about the first allocated 1242 * cluster. 1243 * 1244 * If the request conflicts with another write request in flight, the coroutine 1245 * is queued and will be reentered when the dependency has completed. 1246 * 1247 * Return 0 on success and -errno in error cases 1248 */ 1249 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1250 int *num, uint64_t *host_offset, QCowL2Meta **m) 1251 { 1252 BDRVQcowState *s = bs->opaque; 1253 uint64_t start, remaining; 1254 uint64_t cluster_offset; 1255 uint64_t cur_bytes; 1256 int ret; 1257 1258 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1259 1260 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1261 1262 again: 1263 start = offset; 1264 remaining = *num << BDRV_SECTOR_BITS; 1265 cluster_offset = 0; 1266 *host_offset = 0; 1267 cur_bytes = 0; 1268 *m = NULL; 1269 1270 while (true) { 1271 1272 if (!*host_offset) { 1273 *host_offset = start_of_cluster(s, cluster_offset); 1274 } 1275 1276 assert(remaining >= cur_bytes); 1277 1278 start += cur_bytes; 1279 remaining -= cur_bytes; 1280 cluster_offset += cur_bytes; 1281 1282 if (remaining == 0) { 1283 break; 1284 } 1285 1286 cur_bytes = remaining; 1287 1288 /* 1289 * Now start gathering as many contiguous clusters as possible: 1290 * 1291 * 1. Check for overlaps with in-flight allocations 1292 * 1293 * a) Overlap not in the first cluster -> shorten this request and 1294 * let the caller handle the rest in its next loop iteration. 1295 * 1296 * b) Real overlaps of two requests. Yield and restart the search 1297 * for contiguous clusters (the situation could have changed 1298 * while we were sleeping) 1299 * 1300 * c) TODO: Request starts in the same cluster as the in-flight 1301 * allocation ends. Shorten the COW of the in-fight allocation, 1302 * set cluster_offset to write to the same cluster and set up 1303 * the right synchronisation between the in-flight request and 1304 * the new one. 1305 */ 1306 ret = handle_dependencies(bs, start, &cur_bytes, m); 1307 if (ret == -EAGAIN) { 1308 /* Currently handle_dependencies() doesn't yield if we already had 1309 * an allocation. If it did, we would have to clean up the L2Meta 1310 * structs before starting over. */ 1311 assert(*m == NULL); 1312 goto again; 1313 } else if (ret < 0) { 1314 return ret; 1315 } else if (cur_bytes == 0) { 1316 break; 1317 } else { 1318 /* handle_dependencies() may have decreased cur_bytes (shortened 1319 * the allocations below) so that the next dependency is processed 1320 * correctly during the next loop iteration. */ 1321 } 1322 1323 /* 1324 * 2. Count contiguous COPIED clusters. 1325 */ 1326 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1327 if (ret < 0) { 1328 return ret; 1329 } else if (ret) { 1330 continue; 1331 } else if (cur_bytes == 0) { 1332 break; 1333 } 1334 1335 /* 1336 * 3. If the request still hasn't completed, allocate new clusters, 1337 * considering any cluster_offset of steps 1c or 2. 1338 */ 1339 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1340 if (ret < 0) { 1341 return ret; 1342 } else if (ret) { 1343 continue; 1344 } else { 1345 assert(cur_bytes == 0); 1346 break; 1347 } 1348 } 1349 1350 *num -= remaining >> BDRV_SECTOR_BITS; 1351 assert(*num > 0); 1352 assert(*host_offset != 0); 1353 1354 return 0; 1355 } 1356 1357 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1358 const uint8_t *buf, int buf_size) 1359 { 1360 z_stream strm1, *strm = &strm1; 1361 int ret, out_len; 1362 1363 memset(strm, 0, sizeof(*strm)); 1364 1365 strm->next_in = (uint8_t *)buf; 1366 strm->avail_in = buf_size; 1367 strm->next_out = out_buf; 1368 strm->avail_out = out_buf_size; 1369 1370 ret = inflateInit2(strm, -12); 1371 if (ret != Z_OK) 1372 return -1; 1373 ret = inflate(strm, Z_FINISH); 1374 out_len = strm->next_out - out_buf; 1375 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1376 out_len != out_buf_size) { 1377 inflateEnd(strm); 1378 return -1; 1379 } 1380 inflateEnd(strm); 1381 return 0; 1382 } 1383 1384 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1385 { 1386 BDRVQcowState *s = bs->opaque; 1387 int ret, csize, nb_csectors, sector_offset; 1388 uint64_t coffset; 1389 1390 coffset = cluster_offset & s->cluster_offset_mask; 1391 if (s->cluster_cache_offset != coffset) { 1392 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1393 sector_offset = coffset & 511; 1394 csize = nb_csectors * 512 - sector_offset; 1395 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1396 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 1397 if (ret < 0) { 1398 return ret; 1399 } 1400 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1401 s->cluster_data + sector_offset, csize) < 0) { 1402 return -EIO; 1403 } 1404 s->cluster_cache_offset = coffset; 1405 } 1406 return 0; 1407 } 1408 1409 /* 1410 * This discards as many clusters of nb_clusters as possible at once (i.e. 1411 * all clusters in the same L2 table) and returns the number of discarded 1412 * clusters. 1413 */ 1414 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1415 unsigned int nb_clusters, enum qcow2_discard_type type) 1416 { 1417 BDRVQcowState *s = bs->opaque; 1418 uint64_t *l2_table; 1419 int l2_index; 1420 int ret; 1421 int i; 1422 1423 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1424 if (ret < 0) { 1425 return ret; 1426 } 1427 1428 /* Limit nb_clusters to one L2 table */ 1429 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1430 1431 for (i = 0; i < nb_clusters; i++) { 1432 uint64_t old_l2_entry; 1433 1434 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1435 1436 /* 1437 * Make sure that a discarded area reads back as zeroes for v3 images 1438 * (we cannot do it for v2 without actually writing a zero-filled 1439 * buffer). We can skip the operation if the cluster is already marked 1440 * as zero, or if it's unallocated and we don't have a backing file. 1441 * 1442 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1443 * holding s->lock, so that doesn't work today. 1444 */ 1445 switch (qcow2_get_cluster_type(old_l2_entry)) { 1446 case QCOW2_CLUSTER_UNALLOCATED: 1447 if (!bs->backing_hd) { 1448 continue; 1449 } 1450 break; 1451 1452 case QCOW2_CLUSTER_ZERO: 1453 continue; 1454 1455 case QCOW2_CLUSTER_NORMAL: 1456 case QCOW2_CLUSTER_COMPRESSED: 1457 break; 1458 1459 default: 1460 abort(); 1461 } 1462 1463 /* First remove L2 entries */ 1464 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1465 if (s->qcow_version >= 3) { 1466 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1467 } else { 1468 l2_table[l2_index + i] = cpu_to_be64(0); 1469 } 1470 1471 /* Then decrease the refcount */ 1472 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1473 } 1474 1475 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1476 if (ret < 0) { 1477 return ret; 1478 } 1479 1480 return nb_clusters; 1481 } 1482 1483 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1484 int nb_sectors, enum qcow2_discard_type type) 1485 { 1486 BDRVQcowState *s = bs->opaque; 1487 uint64_t end_offset; 1488 unsigned int nb_clusters; 1489 int ret; 1490 1491 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1492 1493 /* Round start up and end down */ 1494 offset = align_offset(offset, s->cluster_size); 1495 end_offset = start_of_cluster(s, end_offset); 1496 1497 if (offset > end_offset) { 1498 return 0; 1499 } 1500 1501 nb_clusters = size_to_clusters(s, end_offset - offset); 1502 1503 s->cache_discards = true; 1504 1505 /* Each L2 table is handled by its own loop iteration */ 1506 while (nb_clusters > 0) { 1507 ret = discard_single_l2(bs, offset, nb_clusters, type); 1508 if (ret < 0) { 1509 goto fail; 1510 } 1511 1512 nb_clusters -= ret; 1513 offset += (ret * s->cluster_size); 1514 } 1515 1516 ret = 0; 1517 fail: 1518 s->cache_discards = false; 1519 qcow2_process_discards(bs, ret); 1520 1521 return ret; 1522 } 1523 1524 /* 1525 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1526 * all clusters in the same L2 table) and returns the number of zeroed 1527 * clusters. 1528 */ 1529 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1530 unsigned int nb_clusters) 1531 { 1532 BDRVQcowState *s = bs->opaque; 1533 uint64_t *l2_table; 1534 int l2_index; 1535 int ret; 1536 int i; 1537 1538 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1539 if (ret < 0) { 1540 return ret; 1541 } 1542 1543 /* Limit nb_clusters to one L2 table */ 1544 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1545 1546 for (i = 0; i < nb_clusters; i++) { 1547 uint64_t old_offset; 1548 1549 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1550 1551 /* Update L2 entries */ 1552 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1553 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1554 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1555 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1556 } else { 1557 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1558 } 1559 } 1560 1561 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1562 if (ret < 0) { 1563 return ret; 1564 } 1565 1566 return nb_clusters; 1567 } 1568 1569 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1570 { 1571 BDRVQcowState *s = bs->opaque; 1572 unsigned int nb_clusters; 1573 int ret; 1574 1575 /* The zero flag is only supported by version 3 and newer */ 1576 if (s->qcow_version < 3) { 1577 return -ENOTSUP; 1578 } 1579 1580 /* Each L2 table is handled by its own loop iteration */ 1581 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1582 1583 s->cache_discards = true; 1584 1585 while (nb_clusters > 0) { 1586 ret = zero_single_l2(bs, offset, nb_clusters); 1587 if (ret < 0) { 1588 goto fail; 1589 } 1590 1591 nb_clusters -= ret; 1592 offset += (ret * s->cluster_size); 1593 } 1594 1595 ret = 0; 1596 fail: 1597 s->cache_discards = false; 1598 qcow2_process_discards(bs, ret); 1599 1600 return ret; 1601 } 1602 1603 /* 1604 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1605 * non-backed non-pre-allocated zero clusters). 1606 * 1607 * expanded_clusters is a bitmap where every bit corresponds to one cluster in 1608 * the image file; a bit gets set if the corresponding cluster has been used for 1609 * zero expansion (i.e., has been filled with zeroes and is referenced from an 1610 * L2 table). nb_clusters contains the total cluster count of the image file, 1611 * i.e., the number of bits in expanded_clusters. 1612 */ 1613 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1614 int l1_size, uint8_t **expanded_clusters, 1615 uint64_t *nb_clusters) 1616 { 1617 BDRVQcowState *s = bs->opaque; 1618 bool is_active_l1 = (l1_table == s->l1_table); 1619 uint64_t *l2_table = NULL; 1620 int ret; 1621 int i, j; 1622 1623 if (!is_active_l1) { 1624 /* inactive L2 tables require a buffer to be stored in when loading 1625 * them from disk */ 1626 l2_table = qemu_try_blockalign(bs->file, s->cluster_size); 1627 if (l2_table == NULL) { 1628 return -ENOMEM; 1629 } 1630 } 1631 1632 for (i = 0; i < l1_size; i++) { 1633 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1634 bool l2_dirty = false; 1635 1636 if (!l2_offset) { 1637 /* unallocated */ 1638 continue; 1639 } 1640 1641 if (is_active_l1) { 1642 /* get active L2 tables from cache */ 1643 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1644 (void **)&l2_table); 1645 } else { 1646 /* load inactive L2 tables from disk */ 1647 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1648 (void *)l2_table, s->cluster_sectors); 1649 } 1650 if (ret < 0) { 1651 goto fail; 1652 } 1653 1654 for (j = 0; j < s->l2_size; j++) { 1655 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1656 int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index; 1657 int cluster_type = qcow2_get_cluster_type(l2_entry); 1658 bool preallocated = offset != 0; 1659 1660 if (cluster_type == QCOW2_CLUSTER_NORMAL) { 1661 cluster_index = offset >> s->cluster_bits; 1662 assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); 1663 if ((*expanded_clusters)[cluster_index / 8] & 1664 (1 << (cluster_index % 8))) { 1665 /* Probably a shared L2 table; this cluster was a zero 1666 * cluster which has been expanded, its refcount 1667 * therefore most likely requires an update. */ 1668 ret = qcow2_update_cluster_refcount(bs, cluster_index, 1, 1669 QCOW2_DISCARD_NEVER); 1670 if (ret < 0) { 1671 goto fail; 1672 } 1673 /* Since we just increased the refcount, the COPIED flag may 1674 * no longer be set. */ 1675 l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED); 1676 l2_dirty = true; 1677 } 1678 continue; 1679 } 1680 else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) { 1681 continue; 1682 } 1683 1684 if (!preallocated) { 1685 if (!bs->backing_hd) { 1686 /* not backed; therefore we can simply deallocate the 1687 * cluster */ 1688 l2_table[j] = 0; 1689 l2_dirty = true; 1690 continue; 1691 } 1692 1693 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1694 if (offset < 0) { 1695 ret = offset; 1696 goto fail; 1697 } 1698 } 1699 1700 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1701 if (ret < 0) { 1702 if (!preallocated) { 1703 qcow2_free_clusters(bs, offset, s->cluster_size, 1704 QCOW2_DISCARD_ALWAYS); 1705 } 1706 goto fail; 1707 } 1708 1709 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, 1710 s->cluster_sectors, 0); 1711 if (ret < 0) { 1712 if (!preallocated) { 1713 qcow2_free_clusters(bs, offset, s->cluster_size, 1714 QCOW2_DISCARD_ALWAYS); 1715 } 1716 goto fail; 1717 } 1718 1719 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1720 l2_dirty = true; 1721 1722 cluster_index = offset >> s->cluster_bits; 1723 1724 if (cluster_index >= *nb_clusters) { 1725 uint64_t old_bitmap_size = (*nb_clusters + 7) / 8; 1726 uint64_t new_bitmap_size; 1727 /* The offset may lie beyond the old end of the underlying image 1728 * file for growable files only */ 1729 assert(bs->file->growable); 1730 *nb_clusters = size_to_clusters(s, bs->file->total_sectors * 1731 BDRV_SECTOR_SIZE); 1732 new_bitmap_size = (*nb_clusters + 7) / 8; 1733 *expanded_clusters = g_realloc(*expanded_clusters, 1734 new_bitmap_size); 1735 /* clear the newly allocated space */ 1736 memset(&(*expanded_clusters)[old_bitmap_size], 0, 1737 new_bitmap_size - old_bitmap_size); 1738 } 1739 1740 assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); 1741 (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8); 1742 } 1743 1744 if (is_active_l1) { 1745 if (l2_dirty) { 1746 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1747 qcow2_cache_depends_on_flush(s->l2_table_cache); 1748 } 1749 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1750 if (ret < 0) { 1751 l2_table = NULL; 1752 goto fail; 1753 } 1754 } else { 1755 if (l2_dirty) { 1756 ret = qcow2_pre_write_overlap_check(bs, 1757 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1758 s->cluster_size); 1759 if (ret < 0) { 1760 goto fail; 1761 } 1762 1763 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1764 (void *)l2_table, s->cluster_sectors); 1765 if (ret < 0) { 1766 goto fail; 1767 } 1768 } 1769 } 1770 } 1771 1772 ret = 0; 1773 1774 fail: 1775 if (l2_table) { 1776 if (!is_active_l1) { 1777 qemu_vfree(l2_table); 1778 } else { 1779 if (ret < 0) { 1780 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1781 } else { 1782 ret = qcow2_cache_put(bs, s->l2_table_cache, 1783 (void **)&l2_table); 1784 } 1785 } 1786 } 1787 return ret; 1788 } 1789 1790 /* 1791 * For backed images, expands all zero clusters on the image. For non-backed 1792 * images, deallocates all non-pre-allocated zero clusters (and claims the 1793 * allocation for pre-allocated ones). This is important for downgrading to a 1794 * qcow2 version which doesn't yet support metadata zero clusters. 1795 */ 1796 int qcow2_expand_zero_clusters(BlockDriverState *bs) 1797 { 1798 BDRVQcowState *s = bs->opaque; 1799 uint64_t *l1_table = NULL; 1800 uint64_t nb_clusters; 1801 uint8_t *expanded_clusters; 1802 int ret; 1803 int i, j; 1804 1805 nb_clusters = size_to_clusters(s, bs->file->total_sectors * 1806 BDRV_SECTOR_SIZE); 1807 expanded_clusters = g_try_malloc0((nb_clusters + 7) / 8); 1808 if (expanded_clusters == NULL) { 1809 ret = -ENOMEM; 1810 goto fail; 1811 } 1812 1813 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1814 &expanded_clusters, &nb_clusters); 1815 if (ret < 0) { 1816 goto fail; 1817 } 1818 1819 /* Inactive L1 tables may point to active L2 tables - therefore it is 1820 * necessary to flush the L2 table cache before trying to access the L2 1821 * tables pointed to by inactive L1 entries (else we might try to expand 1822 * zero clusters that have already been expanded); furthermore, it is also 1823 * necessary to empty the L2 table cache, since it may contain tables which 1824 * are now going to be modified directly on disk, bypassing the cache. 1825 * qcow2_cache_empty() does both for us. */ 1826 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1827 if (ret < 0) { 1828 goto fail; 1829 } 1830 1831 for (i = 0; i < s->nb_snapshots; i++) { 1832 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1833 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1834 1835 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1836 1837 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / 1838 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); 1839 if (ret < 0) { 1840 goto fail; 1841 } 1842 1843 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1844 be64_to_cpus(&l1_table[j]); 1845 } 1846 1847 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1848 &expanded_clusters, &nb_clusters); 1849 if (ret < 0) { 1850 goto fail; 1851 } 1852 } 1853 1854 ret = 0; 1855 1856 fail: 1857 g_free(expanded_clusters); 1858 g_free(l1_table); 1859 return ret; 1860 } 1861