1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 33 bool exact_size) 34 { 35 BDRVQcowState *s = bs->opaque; 36 int new_l1_size2, ret, i; 37 uint64_t *new_l1_table; 38 int64_t old_l1_table_offset, old_l1_size; 39 int64_t new_l1_table_offset, new_l1_size; 40 uint8_t data[12]; 41 42 if (min_size <= s->l1_size) 43 return 0; 44 45 /* Do a sanity check on min_size before trying to calculate new_l1_size 46 * (this prevents overflows during the while loop for the calculation of 47 * new_l1_size) */ 48 if (min_size > INT_MAX / sizeof(uint64_t)) { 49 return -EFBIG; 50 } 51 52 if (exact_size) { 53 new_l1_size = min_size; 54 } else { 55 /* Bump size up to reduce the number of times we have to grow */ 56 new_l1_size = s->l1_size; 57 if (new_l1_size == 0) { 58 new_l1_size = 1; 59 } 60 while (min_size > new_l1_size) { 61 new_l1_size = (new_l1_size * 3 + 1) / 2; 62 } 63 } 64 65 if (new_l1_size > INT_MAX / sizeof(uint64_t)) { 66 return -EFBIG; 67 } 68 69 #ifdef DEBUG_ALLOC2 70 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 71 s->l1_size, new_l1_size); 72 #endif 73 74 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 75 new_l1_table = qemu_try_blockalign(bs->file, 76 align_offset(new_l1_size2, 512)); 77 if (new_l1_table == NULL) { 78 return -ENOMEM; 79 } 80 memset(new_l1_table, 0, align_offset(new_l1_size2, 512)); 81 82 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 83 84 /* write new table (align to cluster) */ 85 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 86 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 87 if (new_l1_table_offset < 0) { 88 qemu_vfree(new_l1_table); 89 return new_l1_table_offset; 90 } 91 92 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 93 if (ret < 0) { 94 goto fail; 95 } 96 97 /* the L1 position has not yet been updated, so these clusters must 98 * indeed be completely free */ 99 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 100 new_l1_size2); 101 if (ret < 0) { 102 goto fail; 103 } 104 105 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 106 for(i = 0; i < s->l1_size; i++) 107 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 108 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 109 if (ret < 0) 110 goto fail; 111 for(i = 0; i < s->l1_size; i++) 112 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 113 114 /* set new table */ 115 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 116 cpu_to_be32w((uint32_t*)data, new_l1_size); 117 stq_be_p(data + 4, new_l1_table_offset); 118 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 119 if (ret < 0) { 120 goto fail; 121 } 122 qemu_vfree(s->l1_table); 123 old_l1_table_offset = s->l1_table_offset; 124 s->l1_table_offset = new_l1_table_offset; 125 s->l1_table = new_l1_table; 126 old_l1_size = s->l1_size; 127 s->l1_size = new_l1_size; 128 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 129 QCOW2_DISCARD_OTHER); 130 return 0; 131 fail: 132 qemu_vfree(new_l1_table); 133 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 134 QCOW2_DISCARD_OTHER); 135 return ret; 136 } 137 138 /* 139 * l2_load 140 * 141 * Loads a L2 table into memory. If the table is in the cache, the cache 142 * is used; otherwise the L2 table is loaded from the image file. 143 * 144 * Returns a pointer to the L2 table on success, or NULL if the read from 145 * the image file failed. 146 */ 147 148 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 149 uint64_t **l2_table) 150 { 151 BDRVQcowState *s = bs->opaque; 152 int ret; 153 154 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 155 156 return ret; 157 } 158 159 /* 160 * Writes one sector of the L1 table to the disk (can't update single entries 161 * and we really don't want bdrv_pread to perform a read-modify-write) 162 */ 163 #define L1_ENTRIES_PER_SECTOR (512 / 8) 164 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 165 { 166 BDRVQcowState *s = bs->opaque; 167 uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 }; 168 int l1_start_index; 169 int i, ret; 170 171 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 172 for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size; 173 i++) 174 { 175 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 176 } 177 178 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 179 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 180 if (ret < 0) { 181 return ret; 182 } 183 184 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 185 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 186 buf, sizeof(buf)); 187 if (ret < 0) { 188 return ret; 189 } 190 191 return 0; 192 } 193 194 /* 195 * l2_allocate 196 * 197 * Allocate a new l2 entry in the file. If l1_index points to an already 198 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 199 * table) copy the contents of the old L2 table into the newly allocated one. 200 * Otherwise the new table is initialized with zeros. 201 * 202 */ 203 204 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 205 { 206 BDRVQcowState *s = bs->opaque; 207 uint64_t old_l2_offset; 208 uint64_t *l2_table = NULL; 209 int64_t l2_offset; 210 int ret; 211 212 old_l2_offset = s->l1_table[l1_index]; 213 214 trace_qcow2_l2_allocate(bs, l1_index); 215 216 /* allocate a new l2 entry */ 217 218 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 219 if (l2_offset < 0) { 220 ret = l2_offset; 221 goto fail; 222 } 223 224 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 225 if (ret < 0) { 226 goto fail; 227 } 228 229 /* allocate a new entry in the l2 cache */ 230 231 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 232 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 233 if (ret < 0) { 234 goto fail; 235 } 236 237 l2_table = *table; 238 239 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 240 /* if there was no old l2 table, clear the new table */ 241 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 242 } else { 243 uint64_t* old_table; 244 245 /* if there was an old l2 table, read it from the disk */ 246 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 247 ret = qcow2_cache_get(bs, s->l2_table_cache, 248 old_l2_offset & L1E_OFFSET_MASK, 249 (void**) &old_table); 250 if (ret < 0) { 251 goto fail; 252 } 253 254 memcpy(l2_table, old_table, s->cluster_size); 255 256 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); 257 if (ret < 0) { 258 goto fail; 259 } 260 } 261 262 /* write the l2 table to the file */ 263 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 264 265 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 266 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 267 ret = qcow2_cache_flush(bs, s->l2_table_cache); 268 if (ret < 0) { 269 goto fail; 270 } 271 272 /* update the L1 entry */ 273 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 274 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 275 ret = qcow2_write_l1_entry(bs, l1_index); 276 if (ret < 0) { 277 goto fail; 278 } 279 280 *table = l2_table; 281 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 282 return 0; 283 284 fail: 285 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 286 if (l2_table != NULL) { 287 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 288 } 289 s->l1_table[l1_index] = old_l2_offset; 290 if (l2_offset > 0) { 291 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 292 QCOW2_DISCARD_ALWAYS); 293 } 294 return ret; 295 } 296 297 /* 298 * Checks how many clusters in a given L2 table are contiguous in the image 299 * file. As soon as one of the flags in the bitmask stop_flags changes compared 300 * to the first cluster, the search is stopped and the cluster is not counted 301 * as contiguous. (This allows it, for example, to stop at the first compressed 302 * cluster which may require a different handling) 303 */ 304 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 305 uint64_t *l2_table, uint64_t stop_flags) 306 { 307 int i; 308 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 309 uint64_t first_entry = be64_to_cpu(l2_table[0]); 310 uint64_t offset = first_entry & mask; 311 312 if (!offset) 313 return 0; 314 315 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); 316 317 for (i = 0; i < nb_clusters; i++) { 318 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 319 if (offset + (uint64_t) i * cluster_size != l2_entry) { 320 break; 321 } 322 } 323 324 return i; 325 } 326 327 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 328 { 329 int i; 330 331 for (i = 0; i < nb_clusters; i++) { 332 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 333 334 if (type != QCOW2_CLUSTER_UNALLOCATED) { 335 break; 336 } 337 } 338 339 return i; 340 } 341 342 /* The crypt function is compatible with the linux cryptoloop 343 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 344 supported */ 345 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 346 uint8_t *out_buf, const uint8_t *in_buf, 347 int nb_sectors, int enc, 348 const AES_KEY *key) 349 { 350 union { 351 uint64_t ll[2]; 352 uint8_t b[16]; 353 } ivec; 354 int i; 355 356 for(i = 0; i < nb_sectors; i++) { 357 ivec.ll[0] = cpu_to_le64(sector_num); 358 ivec.ll[1] = 0; 359 AES_cbc_encrypt(in_buf, out_buf, 512, key, 360 ivec.b, enc); 361 sector_num++; 362 in_buf += 512; 363 out_buf += 512; 364 } 365 } 366 367 static int coroutine_fn copy_sectors(BlockDriverState *bs, 368 uint64_t start_sect, 369 uint64_t cluster_offset, 370 int n_start, int n_end) 371 { 372 BDRVQcowState *s = bs->opaque; 373 QEMUIOVector qiov; 374 struct iovec iov; 375 int n, ret; 376 377 n = n_end - n_start; 378 if (n <= 0) { 379 return 0; 380 } 381 382 iov.iov_len = n * BDRV_SECTOR_SIZE; 383 iov.iov_base = qemu_try_blockalign(bs, iov.iov_len); 384 if (iov.iov_base == NULL) { 385 return -ENOMEM; 386 } 387 388 qemu_iovec_init_external(&qiov, &iov, 1); 389 390 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 391 392 if (!bs->drv) { 393 ret = -ENOMEDIUM; 394 goto out; 395 } 396 397 /* Call .bdrv_co_readv() directly instead of using the public block-layer 398 * interface. This avoids double I/O throttling and request tracking, 399 * which can lead to deadlock when block layer copy-on-read is enabled. 400 */ 401 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 402 if (ret < 0) { 403 goto out; 404 } 405 406 if (s->crypt_method) { 407 qcow2_encrypt_sectors(s, start_sect + n_start, 408 iov.iov_base, iov.iov_base, n, 1, 409 &s->aes_encrypt_key); 410 } 411 412 ret = qcow2_pre_write_overlap_check(bs, 0, 413 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 414 if (ret < 0) { 415 goto out; 416 } 417 418 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 419 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 420 if (ret < 0) { 421 goto out; 422 } 423 424 ret = 0; 425 out: 426 qemu_vfree(iov.iov_base); 427 return ret; 428 } 429 430 431 /* 432 * get_cluster_offset 433 * 434 * For a given offset of the disk image, find the cluster offset in 435 * qcow2 file. The offset is stored in *cluster_offset. 436 * 437 * on entry, *num is the number of contiguous sectors we'd like to 438 * access following offset. 439 * 440 * on exit, *num is the number of contiguous sectors we can read. 441 * 442 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 443 * cases. 444 */ 445 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 446 int *num, uint64_t *cluster_offset) 447 { 448 BDRVQcowState *s = bs->opaque; 449 unsigned int l2_index; 450 uint64_t l1_index, l2_offset, *l2_table; 451 int l1_bits, c; 452 unsigned int index_in_cluster, nb_clusters; 453 uint64_t nb_available, nb_needed; 454 int ret; 455 456 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 457 nb_needed = *num + index_in_cluster; 458 459 l1_bits = s->l2_bits + s->cluster_bits; 460 461 /* compute how many bytes there are between the offset and 462 * the end of the l1 entry 463 */ 464 465 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 466 467 /* compute the number of available sectors */ 468 469 nb_available = (nb_available >> 9) + index_in_cluster; 470 471 if (nb_needed > nb_available) { 472 nb_needed = nb_available; 473 } 474 475 *cluster_offset = 0; 476 477 /* seek the the l2 offset in the l1 table */ 478 479 l1_index = offset >> l1_bits; 480 if (l1_index >= s->l1_size) { 481 ret = QCOW2_CLUSTER_UNALLOCATED; 482 goto out; 483 } 484 485 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 486 if (!l2_offset) { 487 ret = QCOW2_CLUSTER_UNALLOCATED; 488 goto out; 489 } 490 491 if (offset_into_cluster(s, l2_offset)) { 492 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 493 " unaligned (L1 index: %#" PRIx64 ")", 494 l2_offset, l1_index); 495 return -EIO; 496 } 497 498 /* load the l2 table in memory */ 499 500 ret = l2_load(bs, l2_offset, &l2_table); 501 if (ret < 0) { 502 return ret; 503 } 504 505 /* find the cluster offset for the given disk offset */ 506 507 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 508 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 509 nb_clusters = size_to_clusters(s, nb_needed << 9); 510 511 ret = qcow2_get_cluster_type(*cluster_offset); 512 switch (ret) { 513 case QCOW2_CLUSTER_COMPRESSED: 514 /* Compressed clusters can only be processed one by one */ 515 c = 1; 516 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 517 break; 518 case QCOW2_CLUSTER_ZERO: 519 if (s->qcow_version < 3) { 520 qcow2_signal_corruption(bs, true, -1, -1, "Zero cluster entry found" 521 " in pre-v3 image (L2 offset: %#" PRIx64 522 ", L2 index: %#x)", l2_offset, l2_index); 523 ret = -EIO; 524 goto fail; 525 } 526 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 527 &l2_table[l2_index], QCOW_OFLAG_ZERO); 528 *cluster_offset = 0; 529 break; 530 case QCOW2_CLUSTER_UNALLOCATED: 531 /* how many empty clusters ? */ 532 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 533 *cluster_offset = 0; 534 break; 535 case QCOW2_CLUSTER_NORMAL: 536 /* how many allocated clusters ? */ 537 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 538 &l2_table[l2_index], QCOW_OFLAG_ZERO); 539 *cluster_offset &= L2E_OFFSET_MASK; 540 if (offset_into_cluster(s, *cluster_offset)) { 541 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset %#" 542 PRIx64 " unaligned (L2 offset: %#" PRIx64 543 ", L2 index: %#x)", *cluster_offset, 544 l2_offset, l2_index); 545 ret = -EIO; 546 goto fail; 547 } 548 break; 549 default: 550 abort(); 551 } 552 553 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 554 555 nb_available = (c * s->cluster_sectors); 556 557 out: 558 if (nb_available > nb_needed) 559 nb_available = nb_needed; 560 561 *num = nb_available - index_in_cluster; 562 563 return ret; 564 565 fail: 566 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 567 return ret; 568 } 569 570 /* 571 * get_cluster_table 572 * 573 * for a given disk offset, load (and allocate if needed) 574 * the l2 table. 575 * 576 * the l2 table offset in the qcow2 file and the cluster index 577 * in the l2 table are given to the caller. 578 * 579 * Returns 0 on success, -errno in failure case 580 */ 581 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 582 uint64_t **new_l2_table, 583 int *new_l2_index) 584 { 585 BDRVQcowState *s = bs->opaque; 586 unsigned int l2_index; 587 uint64_t l1_index, l2_offset; 588 uint64_t *l2_table = NULL; 589 int ret; 590 591 /* seek the the l2 offset in the l1 table */ 592 593 l1_index = offset >> (s->l2_bits + s->cluster_bits); 594 if (l1_index >= s->l1_size) { 595 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 596 if (ret < 0) { 597 return ret; 598 } 599 } 600 601 assert(l1_index < s->l1_size); 602 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 603 if (offset_into_cluster(s, l2_offset)) { 604 qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#" PRIx64 605 " unaligned (L1 index: %#" PRIx64 ")", 606 l2_offset, l1_index); 607 return -EIO; 608 } 609 610 /* seek the l2 table of the given l2 offset */ 611 612 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 613 /* load the l2 table in memory */ 614 ret = l2_load(bs, l2_offset, &l2_table); 615 if (ret < 0) { 616 return ret; 617 } 618 } else { 619 /* First allocate a new L2 table (and do COW if needed) */ 620 ret = l2_allocate(bs, l1_index, &l2_table); 621 if (ret < 0) { 622 return ret; 623 } 624 625 /* Then decrease the refcount of the old table */ 626 if (l2_offset) { 627 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 628 QCOW2_DISCARD_OTHER); 629 } 630 } 631 632 /* find the cluster offset for the given disk offset */ 633 634 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 635 636 *new_l2_table = l2_table; 637 *new_l2_index = l2_index; 638 639 return 0; 640 } 641 642 /* 643 * alloc_compressed_cluster_offset 644 * 645 * For a given offset of the disk image, return cluster offset in 646 * qcow2 file. 647 * 648 * If the offset is not found, allocate a new compressed cluster. 649 * 650 * Return the cluster offset if successful, 651 * Return 0, otherwise. 652 * 653 */ 654 655 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 656 uint64_t offset, 657 int compressed_size) 658 { 659 BDRVQcowState *s = bs->opaque; 660 int l2_index, ret; 661 uint64_t *l2_table; 662 int64_t cluster_offset; 663 int nb_csectors; 664 665 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 666 if (ret < 0) { 667 return 0; 668 } 669 670 /* Compression can't overwrite anything. Fail if the cluster was already 671 * allocated. */ 672 cluster_offset = be64_to_cpu(l2_table[l2_index]); 673 if (cluster_offset & L2E_OFFSET_MASK) { 674 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 675 return 0; 676 } 677 678 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 679 if (cluster_offset < 0) { 680 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 681 return 0; 682 } 683 684 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 685 (cluster_offset >> 9); 686 687 cluster_offset |= QCOW_OFLAG_COMPRESSED | 688 ((uint64_t)nb_csectors << s->csize_shift); 689 690 /* update L2 table */ 691 692 /* compressed clusters never have the copied flag */ 693 694 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 695 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 696 l2_table[l2_index] = cpu_to_be64(cluster_offset); 697 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 698 if (ret < 0) { 699 return 0; 700 } 701 702 return cluster_offset; 703 } 704 705 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 706 { 707 BDRVQcowState *s = bs->opaque; 708 int ret; 709 710 if (r->nb_sectors == 0) { 711 return 0; 712 } 713 714 qemu_co_mutex_unlock(&s->lock); 715 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 716 r->offset / BDRV_SECTOR_SIZE, 717 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 718 qemu_co_mutex_lock(&s->lock); 719 720 if (ret < 0) { 721 return ret; 722 } 723 724 /* 725 * Before we update the L2 table to actually point to the new cluster, we 726 * need to be sure that the refcounts have been increased and COW was 727 * handled. 728 */ 729 qcow2_cache_depends_on_flush(s->l2_table_cache); 730 731 return 0; 732 } 733 734 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 735 { 736 BDRVQcowState *s = bs->opaque; 737 int i, j = 0, l2_index, ret; 738 uint64_t *old_cluster, *l2_table; 739 uint64_t cluster_offset = m->alloc_offset; 740 741 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 742 assert(m->nb_clusters > 0); 743 744 old_cluster = g_try_new(uint64_t, m->nb_clusters); 745 if (old_cluster == NULL) { 746 ret = -ENOMEM; 747 goto err; 748 } 749 750 /* copy content of unmodified sectors */ 751 ret = perform_cow(bs, m, &m->cow_start); 752 if (ret < 0) { 753 goto err; 754 } 755 756 ret = perform_cow(bs, m, &m->cow_end); 757 if (ret < 0) { 758 goto err; 759 } 760 761 /* Update L2 table. */ 762 if (s->use_lazy_refcounts) { 763 qcow2_mark_dirty(bs); 764 } 765 if (qcow2_need_accurate_refcounts(s)) { 766 qcow2_cache_set_dependency(bs, s->l2_table_cache, 767 s->refcount_block_cache); 768 } 769 770 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 771 if (ret < 0) { 772 goto err; 773 } 774 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 775 776 assert(l2_index + m->nb_clusters <= s->l2_size); 777 for (i = 0; i < m->nb_clusters; i++) { 778 /* if two concurrent writes happen to the same unallocated cluster 779 * each write allocates separate cluster and writes data concurrently. 780 * The first one to complete updates l2 table with pointer to its 781 * cluster the second one has to do RMW (which is done above by 782 * copy_sectors()), update l2 table with its cluster pointer and free 783 * old cluster. This is what this loop does */ 784 if(l2_table[l2_index + i] != 0) 785 old_cluster[j++] = l2_table[l2_index + i]; 786 787 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 788 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 789 } 790 791 792 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 793 if (ret < 0) { 794 goto err; 795 } 796 797 /* 798 * If this was a COW, we need to decrease the refcount of the old cluster. 799 * Also flush bs->file to get the right order for L2 and refcount update. 800 * 801 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 802 * clusters), the next write will reuse them anyway. 803 */ 804 if (j != 0) { 805 for (i = 0; i < j; i++) { 806 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 807 QCOW2_DISCARD_NEVER); 808 } 809 } 810 811 ret = 0; 812 err: 813 g_free(old_cluster); 814 return ret; 815 } 816 817 /* 818 * Returns the number of contiguous clusters that can be used for an allocating 819 * write, but require COW to be performed (this includes yet unallocated space, 820 * which must copy from the backing file) 821 */ 822 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, 823 uint64_t *l2_table, int l2_index) 824 { 825 int i; 826 827 for (i = 0; i < nb_clusters; i++) { 828 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 829 int cluster_type = qcow2_get_cluster_type(l2_entry); 830 831 switch(cluster_type) { 832 case QCOW2_CLUSTER_NORMAL: 833 if (l2_entry & QCOW_OFLAG_COPIED) { 834 goto out; 835 } 836 break; 837 case QCOW2_CLUSTER_UNALLOCATED: 838 case QCOW2_CLUSTER_COMPRESSED: 839 case QCOW2_CLUSTER_ZERO: 840 break; 841 default: 842 abort(); 843 } 844 } 845 846 out: 847 assert(i <= nb_clusters); 848 return i; 849 } 850 851 /* 852 * Check if there already is an AIO write request in flight which allocates 853 * the same cluster. In this case we need to wait until the previous 854 * request has completed and updated the L2 table accordingly. 855 * 856 * Returns: 857 * 0 if there was no dependency. *cur_bytes indicates the number of 858 * bytes from guest_offset that can be read before the next 859 * dependency must be processed (or the request is complete) 860 * 861 * -EAGAIN if we had to wait for another request, previously gathered 862 * information on cluster allocation may be invalid now. The caller 863 * must start over anyway, so consider *cur_bytes undefined. 864 */ 865 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 866 uint64_t *cur_bytes, QCowL2Meta **m) 867 { 868 BDRVQcowState *s = bs->opaque; 869 QCowL2Meta *old_alloc; 870 uint64_t bytes = *cur_bytes; 871 872 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 873 874 uint64_t start = guest_offset; 875 uint64_t end = start + bytes; 876 uint64_t old_start = l2meta_cow_start(old_alloc); 877 uint64_t old_end = l2meta_cow_end(old_alloc); 878 879 if (end <= old_start || start >= old_end) { 880 /* No intersection */ 881 } else { 882 if (start < old_start) { 883 /* Stop at the start of a running allocation */ 884 bytes = old_start - start; 885 } else { 886 bytes = 0; 887 } 888 889 /* Stop if already an l2meta exists. After yielding, it wouldn't 890 * be valid any more, so we'd have to clean up the old L2Metas 891 * and deal with requests depending on them before starting to 892 * gather new ones. Not worth the trouble. */ 893 if (bytes == 0 && *m) { 894 *cur_bytes = 0; 895 return 0; 896 } 897 898 if (bytes == 0) { 899 /* Wait for the dependency to complete. We need to recheck 900 * the free/allocated clusters when we continue. */ 901 qemu_co_mutex_unlock(&s->lock); 902 qemu_co_queue_wait(&old_alloc->dependent_requests); 903 qemu_co_mutex_lock(&s->lock); 904 return -EAGAIN; 905 } 906 } 907 } 908 909 /* Make sure that existing clusters and new allocations are only used up to 910 * the next dependency if we shortened the request above */ 911 *cur_bytes = bytes; 912 913 return 0; 914 } 915 916 /* 917 * Checks how many already allocated clusters that don't require a copy on 918 * write there are at the given guest_offset (up to *bytes). If 919 * *host_offset is not zero, only physically contiguous clusters beginning at 920 * this host offset are counted. 921 * 922 * Note that guest_offset may not be cluster aligned. In this case, the 923 * returned *host_offset points to exact byte referenced by guest_offset and 924 * therefore isn't cluster aligned as well. 925 * 926 * Returns: 927 * 0: if no allocated clusters are available at the given offset. 928 * *bytes is normally unchanged. It is set to 0 if the cluster 929 * is allocated and doesn't need COW, but doesn't have the right 930 * physical offset. 931 * 932 * 1: if allocated clusters that don't require a COW are available at 933 * the requested offset. *bytes may have decreased and describes 934 * the length of the area that can be written to. 935 * 936 * -errno: in error cases 937 */ 938 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 939 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 940 { 941 BDRVQcowState *s = bs->opaque; 942 int l2_index; 943 uint64_t cluster_offset; 944 uint64_t *l2_table; 945 unsigned int nb_clusters; 946 unsigned int keep_clusters; 947 int ret, pret; 948 949 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 950 *bytes); 951 952 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 953 == offset_into_cluster(s, *host_offset)); 954 955 /* 956 * Calculate the number of clusters to look for. We stop at L2 table 957 * boundaries to keep things simple. 958 */ 959 nb_clusters = 960 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 961 962 l2_index = offset_to_l2_index(s, guest_offset); 963 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 964 965 /* Find L2 entry for the first involved cluster */ 966 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 967 if (ret < 0) { 968 return ret; 969 } 970 971 cluster_offset = be64_to_cpu(l2_table[l2_index]); 972 973 /* Check how many clusters are already allocated and don't need COW */ 974 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 975 && (cluster_offset & QCOW_OFLAG_COPIED)) 976 { 977 /* If a specific host_offset is required, check it */ 978 bool offset_matches = 979 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 980 981 if (offset_into_cluster(s, cluster_offset & L2E_OFFSET_MASK)) { 982 qcow2_signal_corruption(bs, true, -1, -1, "Data cluster offset " 983 "%#llx unaligned (guest offset: %#" PRIx64 984 ")", cluster_offset & L2E_OFFSET_MASK, 985 guest_offset); 986 ret = -EIO; 987 goto out; 988 } 989 990 if (*host_offset != 0 && !offset_matches) { 991 *bytes = 0; 992 ret = 0; 993 goto out; 994 } 995 996 /* We keep all QCOW_OFLAG_COPIED clusters */ 997 keep_clusters = 998 count_contiguous_clusters(nb_clusters, s->cluster_size, 999 &l2_table[l2_index], 1000 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 1001 assert(keep_clusters <= nb_clusters); 1002 1003 *bytes = MIN(*bytes, 1004 keep_clusters * s->cluster_size 1005 - offset_into_cluster(s, guest_offset)); 1006 1007 ret = 1; 1008 } else { 1009 ret = 0; 1010 } 1011 1012 /* Cleanup */ 1013 out: 1014 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1015 if (pret < 0) { 1016 return pret; 1017 } 1018 1019 /* Only return a host offset if we actually made progress. Otherwise we 1020 * would make requirements for handle_alloc() that it can't fulfill */ 1021 if (ret > 0) { 1022 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 1023 + offset_into_cluster(s, guest_offset); 1024 } 1025 1026 return ret; 1027 } 1028 1029 /* 1030 * Allocates new clusters for the given guest_offset. 1031 * 1032 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 1033 * contain the number of clusters that have been allocated and are contiguous 1034 * in the image file. 1035 * 1036 * If *host_offset is non-zero, it specifies the offset in the image file at 1037 * which the new clusters must start. *nb_clusters can be 0 on return in this 1038 * case if the cluster at host_offset is already in use. If *host_offset is 1039 * zero, the clusters can be allocated anywhere in the image file. 1040 * 1041 * *host_offset is updated to contain the offset into the image file at which 1042 * the first allocated cluster starts. 1043 * 1044 * Return 0 on success and -errno in error cases. -EAGAIN means that the 1045 * function has been waiting for another request and the allocation must be 1046 * restarted, but the whole request should not be failed. 1047 */ 1048 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 1049 uint64_t *host_offset, unsigned int *nb_clusters) 1050 { 1051 BDRVQcowState *s = bs->opaque; 1052 1053 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 1054 *host_offset, *nb_clusters); 1055 1056 /* Allocate new clusters */ 1057 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1058 if (*host_offset == 0) { 1059 int64_t cluster_offset = 1060 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1061 if (cluster_offset < 0) { 1062 return cluster_offset; 1063 } 1064 *host_offset = cluster_offset; 1065 return 0; 1066 } else { 1067 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1068 if (ret < 0) { 1069 return ret; 1070 } 1071 *nb_clusters = ret; 1072 return 0; 1073 } 1074 } 1075 1076 /* 1077 * Allocates new clusters for an area that either is yet unallocated or needs a 1078 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1079 * the new allocation can match the specified host offset. 1080 * 1081 * Note that guest_offset may not be cluster aligned. In this case, the 1082 * returned *host_offset points to exact byte referenced by guest_offset and 1083 * therefore isn't cluster aligned as well. 1084 * 1085 * Returns: 1086 * 0: if no clusters could be allocated. *bytes is set to 0, 1087 * *host_offset is left unchanged. 1088 * 1089 * 1: if new clusters were allocated. *bytes may be decreased if the 1090 * new allocation doesn't cover all of the requested area. 1091 * *host_offset is updated to contain the host offset of the first 1092 * newly allocated cluster. 1093 * 1094 * -errno: in error cases 1095 */ 1096 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1097 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1098 { 1099 BDRVQcowState *s = bs->opaque; 1100 int l2_index; 1101 uint64_t *l2_table; 1102 uint64_t entry; 1103 unsigned int nb_clusters; 1104 int ret; 1105 1106 uint64_t alloc_cluster_offset; 1107 1108 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1109 *bytes); 1110 assert(*bytes > 0); 1111 1112 /* 1113 * Calculate the number of clusters to look for. We stop at L2 table 1114 * boundaries to keep things simple. 1115 */ 1116 nb_clusters = 1117 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1118 1119 l2_index = offset_to_l2_index(s, guest_offset); 1120 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1121 1122 /* Find L2 entry for the first involved cluster */ 1123 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1124 if (ret < 0) { 1125 return ret; 1126 } 1127 1128 entry = be64_to_cpu(l2_table[l2_index]); 1129 1130 /* For the moment, overwrite compressed clusters one by one */ 1131 if (entry & QCOW_OFLAG_COMPRESSED) { 1132 nb_clusters = 1; 1133 } else { 1134 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1135 } 1136 1137 /* This function is only called when there were no non-COW clusters, so if 1138 * we can't find any unallocated or COW clusters either, something is 1139 * wrong with our code. */ 1140 assert(nb_clusters > 0); 1141 1142 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1143 if (ret < 0) { 1144 return ret; 1145 } 1146 1147 /* Allocate, if necessary at a given offset in the image file */ 1148 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1149 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1150 &nb_clusters); 1151 if (ret < 0) { 1152 goto fail; 1153 } 1154 1155 /* Can't extend contiguous allocation */ 1156 if (nb_clusters == 0) { 1157 *bytes = 0; 1158 return 0; 1159 } 1160 1161 /* !*host_offset would overwrite the image header and is reserved for "no 1162 * host offset preferred". If 0 was a valid host offset, it'd trigger the 1163 * following overlap check; do that now to avoid having an invalid value in 1164 * *host_offset. */ 1165 if (!alloc_cluster_offset) { 1166 ret = qcow2_pre_write_overlap_check(bs, 0, alloc_cluster_offset, 1167 nb_clusters * s->cluster_size); 1168 assert(ret < 0); 1169 goto fail; 1170 } 1171 1172 /* 1173 * Save info needed for meta data update. 1174 * 1175 * requested_sectors: Number of sectors from the start of the first 1176 * newly allocated cluster to the end of the (possibly shortened 1177 * before) write request. 1178 * 1179 * avail_sectors: Number of sectors from the start of the first 1180 * newly allocated to the end of the last newly allocated cluster. 1181 * 1182 * nb_sectors: The number of sectors from the start of the first 1183 * newly allocated cluster to the end of the area that the write 1184 * request actually writes to (excluding COW at the end) 1185 */ 1186 int requested_sectors = 1187 (*bytes + offset_into_cluster(s, guest_offset)) 1188 >> BDRV_SECTOR_BITS; 1189 int avail_sectors = nb_clusters 1190 << (s->cluster_bits - BDRV_SECTOR_BITS); 1191 int alloc_n_start = offset_into_cluster(s, guest_offset) 1192 >> BDRV_SECTOR_BITS; 1193 int nb_sectors = MIN(requested_sectors, avail_sectors); 1194 QCowL2Meta *old_m = *m; 1195 1196 *m = g_malloc0(sizeof(**m)); 1197 1198 **m = (QCowL2Meta) { 1199 .next = old_m, 1200 1201 .alloc_offset = alloc_cluster_offset, 1202 .offset = start_of_cluster(s, guest_offset), 1203 .nb_clusters = nb_clusters, 1204 .nb_available = nb_sectors, 1205 1206 .cow_start = { 1207 .offset = 0, 1208 .nb_sectors = alloc_n_start, 1209 }, 1210 .cow_end = { 1211 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1212 .nb_sectors = avail_sectors - nb_sectors, 1213 }, 1214 }; 1215 qemu_co_queue_init(&(*m)->dependent_requests); 1216 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1217 1218 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1219 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1220 - offset_into_cluster(s, guest_offset)); 1221 assert(*bytes != 0); 1222 1223 return 1; 1224 1225 fail: 1226 if (*m && (*m)->nb_clusters > 0) { 1227 QLIST_REMOVE(*m, next_in_flight); 1228 } 1229 return ret; 1230 } 1231 1232 /* 1233 * alloc_cluster_offset 1234 * 1235 * For a given offset on the virtual disk, find the cluster offset in qcow2 1236 * file. If the offset is not found, allocate a new cluster. 1237 * 1238 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1239 * other fields in m are meaningless. 1240 * 1241 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1242 * contiguous clusters that have been allocated. In this case, the other 1243 * fields of m are valid and contain information about the first allocated 1244 * cluster. 1245 * 1246 * If the request conflicts with another write request in flight, the coroutine 1247 * is queued and will be reentered when the dependency has completed. 1248 * 1249 * Return 0 on success and -errno in error cases 1250 */ 1251 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1252 int *num, uint64_t *host_offset, QCowL2Meta **m) 1253 { 1254 BDRVQcowState *s = bs->opaque; 1255 uint64_t start, remaining; 1256 uint64_t cluster_offset; 1257 uint64_t cur_bytes; 1258 int ret; 1259 1260 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1261 1262 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1263 1264 again: 1265 start = offset; 1266 remaining = *num << BDRV_SECTOR_BITS; 1267 cluster_offset = 0; 1268 *host_offset = 0; 1269 cur_bytes = 0; 1270 *m = NULL; 1271 1272 while (true) { 1273 1274 if (!*host_offset) { 1275 *host_offset = start_of_cluster(s, cluster_offset); 1276 } 1277 1278 assert(remaining >= cur_bytes); 1279 1280 start += cur_bytes; 1281 remaining -= cur_bytes; 1282 cluster_offset += cur_bytes; 1283 1284 if (remaining == 0) { 1285 break; 1286 } 1287 1288 cur_bytes = remaining; 1289 1290 /* 1291 * Now start gathering as many contiguous clusters as possible: 1292 * 1293 * 1. Check for overlaps with in-flight allocations 1294 * 1295 * a) Overlap not in the first cluster -> shorten this request and 1296 * let the caller handle the rest in its next loop iteration. 1297 * 1298 * b) Real overlaps of two requests. Yield and restart the search 1299 * for contiguous clusters (the situation could have changed 1300 * while we were sleeping) 1301 * 1302 * c) TODO: Request starts in the same cluster as the in-flight 1303 * allocation ends. Shorten the COW of the in-fight allocation, 1304 * set cluster_offset to write to the same cluster and set up 1305 * the right synchronisation between the in-flight request and 1306 * the new one. 1307 */ 1308 ret = handle_dependencies(bs, start, &cur_bytes, m); 1309 if (ret == -EAGAIN) { 1310 /* Currently handle_dependencies() doesn't yield if we already had 1311 * an allocation. If it did, we would have to clean up the L2Meta 1312 * structs before starting over. */ 1313 assert(*m == NULL); 1314 goto again; 1315 } else if (ret < 0) { 1316 return ret; 1317 } else if (cur_bytes == 0) { 1318 break; 1319 } else { 1320 /* handle_dependencies() may have decreased cur_bytes (shortened 1321 * the allocations below) so that the next dependency is processed 1322 * correctly during the next loop iteration. */ 1323 } 1324 1325 /* 1326 * 2. Count contiguous COPIED clusters. 1327 */ 1328 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1329 if (ret < 0) { 1330 return ret; 1331 } else if (ret) { 1332 continue; 1333 } else if (cur_bytes == 0) { 1334 break; 1335 } 1336 1337 /* 1338 * 3. If the request still hasn't completed, allocate new clusters, 1339 * considering any cluster_offset of steps 1c or 2. 1340 */ 1341 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1342 if (ret < 0) { 1343 return ret; 1344 } else if (ret) { 1345 continue; 1346 } else { 1347 assert(cur_bytes == 0); 1348 break; 1349 } 1350 } 1351 1352 *num -= remaining >> BDRV_SECTOR_BITS; 1353 assert(*num > 0); 1354 assert(*host_offset != 0); 1355 1356 return 0; 1357 } 1358 1359 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1360 const uint8_t *buf, int buf_size) 1361 { 1362 z_stream strm1, *strm = &strm1; 1363 int ret, out_len; 1364 1365 memset(strm, 0, sizeof(*strm)); 1366 1367 strm->next_in = (uint8_t *)buf; 1368 strm->avail_in = buf_size; 1369 strm->next_out = out_buf; 1370 strm->avail_out = out_buf_size; 1371 1372 ret = inflateInit2(strm, -12); 1373 if (ret != Z_OK) 1374 return -1; 1375 ret = inflate(strm, Z_FINISH); 1376 out_len = strm->next_out - out_buf; 1377 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1378 out_len != out_buf_size) { 1379 inflateEnd(strm); 1380 return -1; 1381 } 1382 inflateEnd(strm); 1383 return 0; 1384 } 1385 1386 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1387 { 1388 BDRVQcowState *s = bs->opaque; 1389 int ret, csize, nb_csectors, sector_offset; 1390 uint64_t coffset; 1391 1392 coffset = cluster_offset & s->cluster_offset_mask; 1393 if (s->cluster_cache_offset != coffset) { 1394 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1395 sector_offset = coffset & 511; 1396 csize = nb_csectors * 512 - sector_offset; 1397 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1398 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 1399 if (ret < 0) { 1400 return ret; 1401 } 1402 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1403 s->cluster_data + sector_offset, csize) < 0) { 1404 return -EIO; 1405 } 1406 s->cluster_cache_offset = coffset; 1407 } 1408 return 0; 1409 } 1410 1411 /* 1412 * This discards as many clusters of nb_clusters as possible at once (i.e. 1413 * all clusters in the same L2 table) and returns the number of discarded 1414 * clusters. 1415 */ 1416 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1417 unsigned int nb_clusters, enum qcow2_discard_type type, bool full_discard) 1418 { 1419 BDRVQcowState *s = bs->opaque; 1420 uint64_t *l2_table; 1421 int l2_index; 1422 int ret; 1423 int i; 1424 1425 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1426 if (ret < 0) { 1427 return ret; 1428 } 1429 1430 /* Limit nb_clusters to one L2 table */ 1431 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1432 1433 for (i = 0; i < nb_clusters; i++) { 1434 uint64_t old_l2_entry; 1435 1436 old_l2_entry = be64_to_cpu(l2_table[l2_index + i]); 1437 1438 /* 1439 * If full_discard is false, make sure that a discarded area reads back 1440 * as zeroes for v3 images (we cannot do it for v2 without actually 1441 * writing a zero-filled buffer). We can skip the operation if the 1442 * cluster is already marked as zero, or if it's unallocated and we 1443 * don't have a backing file. 1444 * 1445 * TODO We might want to use bdrv_get_block_status(bs) here, but we're 1446 * holding s->lock, so that doesn't work today. 1447 * 1448 * If full_discard is true, the sector should not read back as zeroes, 1449 * but rather fall through to the backing file. 1450 */ 1451 switch (qcow2_get_cluster_type(old_l2_entry)) { 1452 case QCOW2_CLUSTER_UNALLOCATED: 1453 if (full_discard || !bs->backing_hd) { 1454 continue; 1455 } 1456 break; 1457 1458 case QCOW2_CLUSTER_ZERO: 1459 if (!full_discard) { 1460 continue; 1461 } 1462 break; 1463 1464 case QCOW2_CLUSTER_NORMAL: 1465 case QCOW2_CLUSTER_COMPRESSED: 1466 break; 1467 1468 default: 1469 abort(); 1470 } 1471 1472 /* First remove L2 entries */ 1473 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1474 if (!full_discard && s->qcow_version >= 3) { 1475 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1476 } else { 1477 l2_table[l2_index + i] = cpu_to_be64(0); 1478 } 1479 1480 /* Then decrease the refcount */ 1481 qcow2_free_any_clusters(bs, old_l2_entry, 1, type); 1482 } 1483 1484 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1485 if (ret < 0) { 1486 return ret; 1487 } 1488 1489 return nb_clusters; 1490 } 1491 1492 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1493 int nb_sectors, enum qcow2_discard_type type, bool full_discard) 1494 { 1495 BDRVQcowState *s = bs->opaque; 1496 uint64_t end_offset; 1497 unsigned int nb_clusters; 1498 int ret; 1499 1500 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1501 1502 /* Round start up and end down */ 1503 offset = align_offset(offset, s->cluster_size); 1504 end_offset = start_of_cluster(s, end_offset); 1505 1506 if (offset > end_offset) { 1507 return 0; 1508 } 1509 1510 nb_clusters = size_to_clusters(s, end_offset - offset); 1511 1512 s->cache_discards = true; 1513 1514 /* Each L2 table is handled by its own loop iteration */ 1515 while (nb_clusters > 0) { 1516 ret = discard_single_l2(bs, offset, nb_clusters, type, full_discard); 1517 if (ret < 0) { 1518 goto fail; 1519 } 1520 1521 nb_clusters -= ret; 1522 offset += (ret * s->cluster_size); 1523 } 1524 1525 ret = 0; 1526 fail: 1527 s->cache_discards = false; 1528 qcow2_process_discards(bs, ret); 1529 1530 return ret; 1531 } 1532 1533 /* 1534 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1535 * all clusters in the same L2 table) and returns the number of zeroed 1536 * clusters. 1537 */ 1538 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1539 unsigned int nb_clusters) 1540 { 1541 BDRVQcowState *s = bs->opaque; 1542 uint64_t *l2_table; 1543 int l2_index; 1544 int ret; 1545 int i; 1546 1547 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1548 if (ret < 0) { 1549 return ret; 1550 } 1551 1552 /* Limit nb_clusters to one L2 table */ 1553 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1554 1555 for (i = 0; i < nb_clusters; i++) { 1556 uint64_t old_offset; 1557 1558 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1559 1560 /* Update L2 entries */ 1561 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1562 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1563 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1564 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1565 } else { 1566 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1567 } 1568 } 1569 1570 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1571 if (ret < 0) { 1572 return ret; 1573 } 1574 1575 return nb_clusters; 1576 } 1577 1578 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1579 { 1580 BDRVQcowState *s = bs->opaque; 1581 unsigned int nb_clusters; 1582 int ret; 1583 1584 /* The zero flag is only supported by version 3 and newer */ 1585 if (s->qcow_version < 3) { 1586 return -ENOTSUP; 1587 } 1588 1589 /* Each L2 table is handled by its own loop iteration */ 1590 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1591 1592 s->cache_discards = true; 1593 1594 while (nb_clusters > 0) { 1595 ret = zero_single_l2(bs, offset, nb_clusters); 1596 if (ret < 0) { 1597 goto fail; 1598 } 1599 1600 nb_clusters -= ret; 1601 offset += (ret * s->cluster_size); 1602 } 1603 1604 ret = 0; 1605 fail: 1606 s->cache_discards = false; 1607 qcow2_process_discards(bs, ret); 1608 1609 return ret; 1610 } 1611 1612 /* 1613 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1614 * non-backed non-pre-allocated zero clusters). 1615 * 1616 * l1_entries and *visited_l1_entries are used to keep track of progress for 1617 * status_cb(). l1_entries contains the total number of L1 entries and 1618 * *visited_l1_entries counts all visited L1 entries. 1619 */ 1620 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1621 int l1_size, int64_t *visited_l1_entries, 1622 int64_t l1_entries, 1623 BlockDriverAmendStatusCB *status_cb) 1624 { 1625 BDRVQcowState *s = bs->opaque; 1626 bool is_active_l1 = (l1_table == s->l1_table); 1627 uint64_t *l2_table = NULL; 1628 int ret; 1629 int i, j; 1630 1631 if (!is_active_l1) { 1632 /* inactive L2 tables require a buffer to be stored in when loading 1633 * them from disk */ 1634 l2_table = qemu_try_blockalign(bs->file, s->cluster_size); 1635 if (l2_table == NULL) { 1636 return -ENOMEM; 1637 } 1638 } 1639 1640 for (i = 0; i < l1_size; i++) { 1641 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1642 bool l2_dirty = false; 1643 int l2_refcount; 1644 1645 if (!l2_offset) { 1646 /* unallocated */ 1647 (*visited_l1_entries)++; 1648 if (status_cb) { 1649 status_cb(bs, *visited_l1_entries, l1_entries); 1650 } 1651 continue; 1652 } 1653 1654 if (is_active_l1) { 1655 /* get active L2 tables from cache */ 1656 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1657 (void **)&l2_table); 1658 } else { 1659 /* load inactive L2 tables from disk */ 1660 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1661 (void *)l2_table, s->cluster_sectors); 1662 } 1663 if (ret < 0) { 1664 goto fail; 1665 } 1666 1667 l2_refcount = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits); 1668 if (l2_refcount < 0) { 1669 ret = l2_refcount; 1670 goto fail; 1671 } 1672 1673 for (j = 0; j < s->l2_size; j++) { 1674 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1675 int64_t offset = l2_entry & L2E_OFFSET_MASK; 1676 int cluster_type = qcow2_get_cluster_type(l2_entry); 1677 bool preallocated = offset != 0; 1678 1679 if (cluster_type != QCOW2_CLUSTER_ZERO) { 1680 continue; 1681 } 1682 1683 if (!preallocated) { 1684 if (!bs->backing_hd) { 1685 /* not backed; therefore we can simply deallocate the 1686 * cluster */ 1687 l2_table[j] = 0; 1688 l2_dirty = true; 1689 continue; 1690 } 1691 1692 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1693 if (offset < 0) { 1694 ret = offset; 1695 goto fail; 1696 } 1697 1698 if (l2_refcount > 1) { 1699 /* For shared L2 tables, set the refcount accordingly (it is 1700 * already 1 and needs to be l2_refcount) */ 1701 ret = qcow2_update_cluster_refcount(bs, 1702 offset >> s->cluster_bits, l2_refcount - 1, 1703 QCOW2_DISCARD_OTHER); 1704 if (ret < 0) { 1705 qcow2_free_clusters(bs, offset, s->cluster_size, 1706 QCOW2_DISCARD_OTHER); 1707 goto fail; 1708 } 1709 } 1710 } 1711 1712 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1713 if (ret < 0) { 1714 if (!preallocated) { 1715 qcow2_free_clusters(bs, offset, s->cluster_size, 1716 QCOW2_DISCARD_ALWAYS); 1717 } 1718 goto fail; 1719 } 1720 1721 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, 1722 s->cluster_sectors, 0); 1723 if (ret < 0) { 1724 if (!preallocated) { 1725 qcow2_free_clusters(bs, offset, s->cluster_size, 1726 QCOW2_DISCARD_ALWAYS); 1727 } 1728 goto fail; 1729 } 1730 1731 if (l2_refcount == 1) { 1732 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1733 } else { 1734 l2_table[j] = cpu_to_be64(offset); 1735 } 1736 l2_dirty = true; 1737 } 1738 1739 if (is_active_l1) { 1740 if (l2_dirty) { 1741 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1742 qcow2_cache_depends_on_flush(s->l2_table_cache); 1743 } 1744 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1745 if (ret < 0) { 1746 l2_table = NULL; 1747 goto fail; 1748 } 1749 } else { 1750 if (l2_dirty) { 1751 ret = qcow2_pre_write_overlap_check(bs, 1752 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1753 s->cluster_size); 1754 if (ret < 0) { 1755 goto fail; 1756 } 1757 1758 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1759 (void *)l2_table, s->cluster_sectors); 1760 if (ret < 0) { 1761 goto fail; 1762 } 1763 } 1764 } 1765 1766 (*visited_l1_entries)++; 1767 if (status_cb) { 1768 status_cb(bs, *visited_l1_entries, l1_entries); 1769 } 1770 } 1771 1772 ret = 0; 1773 1774 fail: 1775 if (l2_table) { 1776 if (!is_active_l1) { 1777 qemu_vfree(l2_table); 1778 } else { 1779 if (ret < 0) { 1780 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1781 } else { 1782 ret = qcow2_cache_put(bs, s->l2_table_cache, 1783 (void **)&l2_table); 1784 } 1785 } 1786 } 1787 return ret; 1788 } 1789 1790 /* 1791 * For backed images, expands all zero clusters on the image. For non-backed 1792 * images, deallocates all non-pre-allocated zero clusters (and claims the 1793 * allocation for pre-allocated ones). This is important for downgrading to a 1794 * qcow2 version which doesn't yet support metadata zero clusters. 1795 */ 1796 int qcow2_expand_zero_clusters(BlockDriverState *bs, 1797 BlockDriverAmendStatusCB *status_cb) 1798 { 1799 BDRVQcowState *s = bs->opaque; 1800 uint64_t *l1_table = NULL; 1801 int64_t l1_entries = 0, visited_l1_entries = 0; 1802 int ret; 1803 int i, j; 1804 1805 if (status_cb) { 1806 l1_entries = s->l1_size; 1807 for (i = 0; i < s->nb_snapshots; i++) { 1808 l1_entries += s->snapshots[i].l1_size; 1809 } 1810 } 1811 1812 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1813 &visited_l1_entries, l1_entries, 1814 status_cb); 1815 if (ret < 0) { 1816 goto fail; 1817 } 1818 1819 /* Inactive L1 tables may point to active L2 tables - therefore it is 1820 * necessary to flush the L2 table cache before trying to access the L2 1821 * tables pointed to by inactive L1 entries (else we might try to expand 1822 * zero clusters that have already been expanded); furthermore, it is also 1823 * necessary to empty the L2 table cache, since it may contain tables which 1824 * are now going to be modified directly on disk, bypassing the cache. 1825 * qcow2_cache_empty() does both for us. */ 1826 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1827 if (ret < 0) { 1828 goto fail; 1829 } 1830 1831 for (i = 0; i < s->nb_snapshots; i++) { 1832 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1833 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1834 1835 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1836 1837 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / 1838 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); 1839 if (ret < 0) { 1840 goto fail; 1841 } 1842 1843 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1844 be64_to_cpus(&l1_table[j]); 1845 } 1846 1847 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1848 &visited_l1_entries, l1_entries, 1849 status_cb); 1850 if (ret < 0) { 1851 goto fail; 1852 } 1853 } 1854 1855 ret = 0; 1856 1857 fail: 1858 g_free(l1_table); 1859 return ret; 1860 } 1861