1 /* 2 * Block driver for the QCOW version 2 format 3 * 4 * Copyright (c) 2004-2006 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include <zlib.h> 26 27 #include "qemu-common.h" 28 #include "block/block_int.h" 29 #include "block/qcow2.h" 30 #include "trace.h" 31 32 int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, 33 bool exact_size) 34 { 35 BDRVQcowState *s = bs->opaque; 36 int new_l1_size2, ret, i; 37 uint64_t *new_l1_table; 38 int64_t old_l1_table_offset, old_l1_size; 39 int64_t new_l1_table_offset, new_l1_size; 40 uint8_t data[12]; 41 42 if (min_size <= s->l1_size) 43 return 0; 44 45 if (exact_size) { 46 new_l1_size = min_size; 47 } else { 48 /* Bump size up to reduce the number of times we have to grow */ 49 new_l1_size = s->l1_size; 50 if (new_l1_size == 0) { 51 new_l1_size = 1; 52 } 53 while (min_size > new_l1_size) { 54 new_l1_size = (new_l1_size * 3 + 1) / 2; 55 } 56 } 57 58 if (new_l1_size > INT_MAX) { 59 return -EFBIG; 60 } 61 62 #ifdef DEBUG_ALLOC2 63 fprintf(stderr, "grow l1_table from %d to %" PRId64 "\n", 64 s->l1_size, new_l1_size); 65 #endif 66 67 new_l1_size2 = sizeof(uint64_t) * new_l1_size; 68 new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); 69 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); 70 71 /* write new table (align to cluster) */ 72 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); 73 new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); 74 if (new_l1_table_offset < 0) { 75 g_free(new_l1_table); 76 return new_l1_table_offset; 77 } 78 79 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 80 if (ret < 0) { 81 goto fail; 82 } 83 84 /* the L1 position has not yet been updated, so these clusters must 85 * indeed be completely free */ 86 ret = qcow2_pre_write_overlap_check(bs, 0, new_l1_table_offset, 87 new_l1_size2); 88 if (ret < 0) { 89 goto fail; 90 } 91 92 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); 93 for(i = 0; i < s->l1_size; i++) 94 new_l1_table[i] = cpu_to_be64(new_l1_table[i]); 95 ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); 96 if (ret < 0) 97 goto fail; 98 for(i = 0; i < s->l1_size; i++) 99 new_l1_table[i] = be64_to_cpu(new_l1_table[i]); 100 101 /* set new table */ 102 BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); 103 cpu_to_be32w((uint32_t*)data, new_l1_size); 104 stq_be_p(data + 4, new_l1_table_offset); 105 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); 106 if (ret < 0) { 107 goto fail; 108 } 109 g_free(s->l1_table); 110 old_l1_table_offset = s->l1_table_offset; 111 s->l1_table_offset = new_l1_table_offset; 112 s->l1_table = new_l1_table; 113 old_l1_size = s->l1_size; 114 s->l1_size = new_l1_size; 115 qcow2_free_clusters(bs, old_l1_table_offset, old_l1_size * sizeof(uint64_t), 116 QCOW2_DISCARD_OTHER); 117 return 0; 118 fail: 119 g_free(new_l1_table); 120 qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2, 121 QCOW2_DISCARD_OTHER); 122 return ret; 123 } 124 125 /* 126 * l2_load 127 * 128 * Loads a L2 table into memory. If the table is in the cache, the cache 129 * is used; otherwise the L2 table is loaded from the image file. 130 * 131 * Returns a pointer to the L2 table on success, or NULL if the read from 132 * the image file failed. 133 */ 134 135 static int l2_load(BlockDriverState *bs, uint64_t l2_offset, 136 uint64_t **l2_table) 137 { 138 BDRVQcowState *s = bs->opaque; 139 int ret; 140 141 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); 142 143 return ret; 144 } 145 146 /* 147 * Writes one sector of the L1 table to the disk (can't update single entries 148 * and we really don't want bdrv_pread to perform a read-modify-write) 149 */ 150 #define L1_ENTRIES_PER_SECTOR (512 / 8) 151 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index) 152 { 153 BDRVQcowState *s = bs->opaque; 154 uint64_t buf[L1_ENTRIES_PER_SECTOR]; 155 int l1_start_index; 156 int i, ret; 157 158 l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); 159 for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { 160 buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); 161 } 162 163 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1, 164 s->l1_table_offset + 8 * l1_start_index, sizeof(buf)); 165 if (ret < 0) { 166 return ret; 167 } 168 169 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); 170 ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, 171 buf, sizeof(buf)); 172 if (ret < 0) { 173 return ret; 174 } 175 176 return 0; 177 } 178 179 /* 180 * l2_allocate 181 * 182 * Allocate a new l2 entry in the file. If l1_index points to an already 183 * used entry in the L2 table (i.e. we are doing a copy on write for the L2 184 * table) copy the contents of the old L2 table into the newly allocated one. 185 * Otherwise the new table is initialized with zeros. 186 * 187 */ 188 189 static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) 190 { 191 BDRVQcowState *s = bs->opaque; 192 uint64_t old_l2_offset; 193 uint64_t *l2_table = NULL; 194 int64_t l2_offset; 195 int ret; 196 197 old_l2_offset = s->l1_table[l1_index]; 198 199 trace_qcow2_l2_allocate(bs, l1_index); 200 201 /* allocate a new l2 entry */ 202 203 l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); 204 if (l2_offset < 0) { 205 ret = l2_offset; 206 goto fail; 207 } 208 209 ret = qcow2_cache_flush(bs, s->refcount_block_cache); 210 if (ret < 0) { 211 goto fail; 212 } 213 214 /* allocate a new entry in the l2 cache */ 215 216 trace_qcow2_l2_allocate_get_empty(bs, l1_index); 217 ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); 218 if (ret < 0) { 219 goto fail; 220 } 221 222 l2_table = *table; 223 224 if ((old_l2_offset & L1E_OFFSET_MASK) == 0) { 225 /* if there was no old l2 table, clear the new table */ 226 memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); 227 } else { 228 uint64_t* old_table; 229 230 /* if there was an old l2 table, read it from the disk */ 231 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); 232 ret = qcow2_cache_get(bs, s->l2_table_cache, 233 old_l2_offset & L1E_OFFSET_MASK, 234 (void**) &old_table); 235 if (ret < 0) { 236 goto fail; 237 } 238 239 memcpy(l2_table, old_table, s->cluster_size); 240 241 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); 242 if (ret < 0) { 243 goto fail; 244 } 245 } 246 247 /* write the l2 table to the file */ 248 BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); 249 250 trace_qcow2_l2_allocate_write_l2(bs, l1_index); 251 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 252 ret = qcow2_cache_flush(bs, s->l2_table_cache); 253 if (ret < 0) { 254 goto fail; 255 } 256 257 /* update the L1 entry */ 258 trace_qcow2_l2_allocate_write_l1(bs, l1_index); 259 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; 260 ret = qcow2_write_l1_entry(bs, l1_index); 261 if (ret < 0) { 262 goto fail; 263 } 264 265 *table = l2_table; 266 trace_qcow2_l2_allocate_done(bs, l1_index, 0); 267 return 0; 268 269 fail: 270 trace_qcow2_l2_allocate_done(bs, l1_index, ret); 271 if (l2_table != NULL) { 272 qcow2_cache_put(bs, s->l2_table_cache, (void**) table); 273 } 274 s->l1_table[l1_index] = old_l2_offset; 275 if (l2_offset > 0) { 276 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 277 QCOW2_DISCARD_ALWAYS); 278 } 279 return ret; 280 } 281 282 /* 283 * Checks how many clusters in a given L2 table are contiguous in the image 284 * file. As soon as one of the flags in the bitmask stop_flags changes compared 285 * to the first cluster, the search is stopped and the cluster is not counted 286 * as contiguous. (This allows it, for example, to stop at the first compressed 287 * cluster which may require a different handling) 288 */ 289 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, 290 uint64_t *l2_table, uint64_t stop_flags) 291 { 292 int i; 293 uint64_t mask = stop_flags | L2E_OFFSET_MASK | QCOW_OFLAG_COMPRESSED; 294 uint64_t first_entry = be64_to_cpu(l2_table[0]); 295 uint64_t offset = first_entry & mask; 296 297 if (!offset) 298 return 0; 299 300 assert(qcow2_get_cluster_type(first_entry) != QCOW2_CLUSTER_COMPRESSED); 301 302 for (i = 0; i < nb_clusters; i++) { 303 uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; 304 if (offset + (uint64_t) i * cluster_size != l2_entry) { 305 break; 306 } 307 } 308 309 return i; 310 } 311 312 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) 313 { 314 int i; 315 316 for (i = 0; i < nb_clusters; i++) { 317 int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); 318 319 if (type != QCOW2_CLUSTER_UNALLOCATED) { 320 break; 321 } 322 } 323 324 return i; 325 } 326 327 /* The crypt function is compatible with the linux cryptoloop 328 algorithm for < 4 GB images. NOTE: out_buf == in_buf is 329 supported */ 330 void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, 331 uint8_t *out_buf, const uint8_t *in_buf, 332 int nb_sectors, int enc, 333 const AES_KEY *key) 334 { 335 union { 336 uint64_t ll[2]; 337 uint8_t b[16]; 338 } ivec; 339 int i; 340 341 for(i = 0; i < nb_sectors; i++) { 342 ivec.ll[0] = cpu_to_le64(sector_num); 343 ivec.ll[1] = 0; 344 AES_cbc_encrypt(in_buf, out_buf, 512, key, 345 ivec.b, enc); 346 sector_num++; 347 in_buf += 512; 348 out_buf += 512; 349 } 350 } 351 352 static int coroutine_fn copy_sectors(BlockDriverState *bs, 353 uint64_t start_sect, 354 uint64_t cluster_offset, 355 int n_start, int n_end) 356 { 357 BDRVQcowState *s = bs->opaque; 358 QEMUIOVector qiov; 359 struct iovec iov; 360 int n, ret; 361 362 /* 363 * If this is the last cluster and it is only partially used, we must only 364 * copy until the end of the image, or bdrv_check_request will fail for the 365 * bdrv_read/write calls below. 366 */ 367 if (start_sect + n_end > bs->total_sectors) { 368 n_end = bs->total_sectors - start_sect; 369 } 370 371 n = n_end - n_start; 372 if (n <= 0) { 373 return 0; 374 } 375 376 iov.iov_len = n * BDRV_SECTOR_SIZE; 377 iov.iov_base = qemu_blockalign(bs, iov.iov_len); 378 379 qemu_iovec_init_external(&qiov, &iov, 1); 380 381 BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); 382 383 /* Call .bdrv_co_readv() directly instead of using the public block-layer 384 * interface. This avoids double I/O throttling and request tracking, 385 * which can lead to deadlock when block layer copy-on-read is enabled. 386 */ 387 ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); 388 if (ret < 0) { 389 goto out; 390 } 391 392 if (s->crypt_method) { 393 qcow2_encrypt_sectors(s, start_sect + n_start, 394 iov.iov_base, iov.iov_base, n, 1, 395 &s->aes_encrypt_key); 396 } 397 398 ret = qcow2_pre_write_overlap_check(bs, 0, 399 cluster_offset + n_start * BDRV_SECTOR_SIZE, n * BDRV_SECTOR_SIZE); 400 if (ret < 0) { 401 goto out; 402 } 403 404 BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); 405 ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); 406 if (ret < 0) { 407 goto out; 408 } 409 410 ret = 0; 411 out: 412 qemu_vfree(iov.iov_base); 413 return ret; 414 } 415 416 417 /* 418 * get_cluster_offset 419 * 420 * For a given offset of the disk image, find the cluster offset in 421 * qcow2 file. The offset is stored in *cluster_offset. 422 * 423 * on entry, *num is the number of contiguous sectors we'd like to 424 * access following offset. 425 * 426 * on exit, *num is the number of contiguous sectors we can read. 427 * 428 * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error 429 * cases. 430 */ 431 int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, 432 int *num, uint64_t *cluster_offset) 433 { 434 BDRVQcowState *s = bs->opaque; 435 unsigned int l2_index; 436 uint64_t l1_index, l2_offset, *l2_table; 437 int l1_bits, c; 438 unsigned int index_in_cluster, nb_clusters; 439 uint64_t nb_available, nb_needed; 440 int ret; 441 442 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); 443 nb_needed = *num + index_in_cluster; 444 445 l1_bits = s->l2_bits + s->cluster_bits; 446 447 /* compute how many bytes there are between the offset and 448 * the end of the l1 entry 449 */ 450 451 nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); 452 453 /* compute the number of available sectors */ 454 455 nb_available = (nb_available >> 9) + index_in_cluster; 456 457 if (nb_needed > nb_available) { 458 nb_needed = nb_available; 459 } 460 461 *cluster_offset = 0; 462 463 /* seek the the l2 offset in the l1 table */ 464 465 l1_index = offset >> l1_bits; 466 if (l1_index >= s->l1_size) { 467 ret = QCOW2_CLUSTER_UNALLOCATED; 468 goto out; 469 } 470 471 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 472 if (!l2_offset) { 473 ret = QCOW2_CLUSTER_UNALLOCATED; 474 goto out; 475 } 476 477 /* load the l2 table in memory */ 478 479 ret = l2_load(bs, l2_offset, &l2_table); 480 if (ret < 0) { 481 return ret; 482 } 483 484 /* find the cluster offset for the given disk offset */ 485 486 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 487 *cluster_offset = be64_to_cpu(l2_table[l2_index]); 488 nb_clusters = size_to_clusters(s, nb_needed << 9); 489 490 ret = qcow2_get_cluster_type(*cluster_offset); 491 switch (ret) { 492 case QCOW2_CLUSTER_COMPRESSED: 493 /* Compressed clusters can only be processed one by one */ 494 c = 1; 495 *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; 496 break; 497 case QCOW2_CLUSTER_ZERO: 498 if (s->qcow_version < 3) { 499 return -EIO; 500 } 501 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 502 &l2_table[l2_index], QCOW_OFLAG_ZERO); 503 *cluster_offset = 0; 504 break; 505 case QCOW2_CLUSTER_UNALLOCATED: 506 /* how many empty clusters ? */ 507 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); 508 *cluster_offset = 0; 509 break; 510 case QCOW2_CLUSTER_NORMAL: 511 /* how many allocated clusters ? */ 512 c = count_contiguous_clusters(nb_clusters, s->cluster_size, 513 &l2_table[l2_index], QCOW_OFLAG_ZERO); 514 *cluster_offset &= L2E_OFFSET_MASK; 515 break; 516 default: 517 abort(); 518 } 519 520 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 521 522 nb_available = (c * s->cluster_sectors); 523 524 out: 525 if (nb_available > nb_needed) 526 nb_available = nb_needed; 527 528 *num = nb_available - index_in_cluster; 529 530 return ret; 531 } 532 533 /* 534 * get_cluster_table 535 * 536 * for a given disk offset, load (and allocate if needed) 537 * the l2 table. 538 * 539 * the l2 table offset in the qcow2 file and the cluster index 540 * in the l2 table are given to the caller. 541 * 542 * Returns 0 on success, -errno in failure case 543 */ 544 static int get_cluster_table(BlockDriverState *bs, uint64_t offset, 545 uint64_t **new_l2_table, 546 int *new_l2_index) 547 { 548 BDRVQcowState *s = bs->opaque; 549 unsigned int l2_index; 550 uint64_t l1_index, l2_offset; 551 uint64_t *l2_table = NULL; 552 int ret; 553 554 /* seek the the l2 offset in the l1 table */ 555 556 l1_index = offset >> (s->l2_bits + s->cluster_bits); 557 if (l1_index >= s->l1_size) { 558 ret = qcow2_grow_l1_table(bs, l1_index + 1, false); 559 if (ret < 0) { 560 return ret; 561 } 562 } 563 564 assert(l1_index < s->l1_size); 565 l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; 566 567 /* seek the l2 table of the given l2 offset */ 568 569 if (s->l1_table[l1_index] & QCOW_OFLAG_COPIED) { 570 /* load the l2 table in memory */ 571 ret = l2_load(bs, l2_offset, &l2_table); 572 if (ret < 0) { 573 return ret; 574 } 575 } else { 576 /* First allocate a new L2 table (and do COW if needed) */ 577 ret = l2_allocate(bs, l1_index, &l2_table); 578 if (ret < 0) { 579 return ret; 580 } 581 582 /* Then decrease the refcount of the old table */ 583 if (l2_offset) { 584 qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t), 585 QCOW2_DISCARD_OTHER); 586 } 587 } 588 589 /* find the cluster offset for the given disk offset */ 590 591 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); 592 593 *new_l2_table = l2_table; 594 *new_l2_index = l2_index; 595 596 return 0; 597 } 598 599 /* 600 * alloc_compressed_cluster_offset 601 * 602 * For a given offset of the disk image, return cluster offset in 603 * qcow2 file. 604 * 605 * If the offset is not found, allocate a new compressed cluster. 606 * 607 * Return the cluster offset if successful, 608 * Return 0, otherwise. 609 * 610 */ 611 612 uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, 613 uint64_t offset, 614 int compressed_size) 615 { 616 BDRVQcowState *s = bs->opaque; 617 int l2_index, ret; 618 uint64_t *l2_table; 619 int64_t cluster_offset; 620 int nb_csectors; 621 622 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 623 if (ret < 0) { 624 return 0; 625 } 626 627 /* Compression can't overwrite anything. Fail if the cluster was already 628 * allocated. */ 629 cluster_offset = be64_to_cpu(l2_table[l2_index]); 630 if (cluster_offset & L2E_OFFSET_MASK) { 631 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 632 return 0; 633 } 634 635 cluster_offset = qcow2_alloc_bytes(bs, compressed_size); 636 if (cluster_offset < 0) { 637 qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 638 return 0; 639 } 640 641 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - 642 (cluster_offset >> 9); 643 644 cluster_offset |= QCOW_OFLAG_COMPRESSED | 645 ((uint64_t)nb_csectors << s->csize_shift); 646 647 /* update L2 table */ 648 649 /* compressed clusters never have the copied flag */ 650 651 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); 652 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 653 l2_table[l2_index] = cpu_to_be64(cluster_offset); 654 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 655 if (ret < 0) { 656 return 0; 657 } 658 659 return cluster_offset; 660 } 661 662 static int perform_cow(BlockDriverState *bs, QCowL2Meta *m, Qcow2COWRegion *r) 663 { 664 BDRVQcowState *s = bs->opaque; 665 int ret; 666 667 if (r->nb_sectors == 0) { 668 return 0; 669 } 670 671 qemu_co_mutex_unlock(&s->lock); 672 ret = copy_sectors(bs, m->offset / BDRV_SECTOR_SIZE, m->alloc_offset, 673 r->offset / BDRV_SECTOR_SIZE, 674 r->offset / BDRV_SECTOR_SIZE + r->nb_sectors); 675 qemu_co_mutex_lock(&s->lock); 676 677 if (ret < 0) { 678 return ret; 679 } 680 681 /* 682 * Before we update the L2 table to actually point to the new cluster, we 683 * need to be sure that the refcounts have been increased and COW was 684 * handled. 685 */ 686 qcow2_cache_depends_on_flush(s->l2_table_cache); 687 688 return 0; 689 } 690 691 int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) 692 { 693 BDRVQcowState *s = bs->opaque; 694 int i, j = 0, l2_index, ret; 695 uint64_t *old_cluster, *l2_table; 696 uint64_t cluster_offset = m->alloc_offset; 697 698 trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); 699 assert(m->nb_clusters > 0); 700 701 old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); 702 703 /* copy content of unmodified sectors */ 704 ret = perform_cow(bs, m, &m->cow_start); 705 if (ret < 0) { 706 goto err; 707 } 708 709 ret = perform_cow(bs, m, &m->cow_end); 710 if (ret < 0) { 711 goto err; 712 } 713 714 /* Update L2 table. */ 715 if (s->use_lazy_refcounts) { 716 qcow2_mark_dirty(bs); 717 } 718 if (qcow2_need_accurate_refcounts(s)) { 719 qcow2_cache_set_dependency(bs, s->l2_table_cache, 720 s->refcount_block_cache); 721 } 722 723 ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); 724 if (ret < 0) { 725 goto err; 726 } 727 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 728 729 assert(l2_index + m->nb_clusters <= s->l2_size); 730 for (i = 0; i < m->nb_clusters; i++) { 731 /* if two concurrent writes happen to the same unallocated cluster 732 * each write allocates separate cluster and writes data concurrently. 733 * The first one to complete updates l2 table with pointer to its 734 * cluster the second one has to do RMW (which is done above by 735 * copy_sectors()), update l2 table with its cluster pointer and free 736 * old cluster. This is what this loop does */ 737 if(l2_table[l2_index + i] != 0) 738 old_cluster[j++] = l2_table[l2_index + i]; 739 740 l2_table[l2_index + i] = cpu_to_be64((cluster_offset + 741 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); 742 } 743 744 745 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 746 if (ret < 0) { 747 goto err; 748 } 749 750 /* 751 * If this was a COW, we need to decrease the refcount of the old cluster. 752 * Also flush bs->file to get the right order for L2 and refcount update. 753 * 754 * Don't discard clusters that reach a refcount of 0 (e.g. compressed 755 * clusters), the next write will reuse them anyway. 756 */ 757 if (j != 0) { 758 for (i = 0; i < j; i++) { 759 qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1, 760 QCOW2_DISCARD_NEVER); 761 } 762 } 763 764 ret = 0; 765 err: 766 g_free(old_cluster); 767 return ret; 768 } 769 770 /* 771 * Returns the number of contiguous clusters that can be used for an allocating 772 * write, but require COW to be performed (this includes yet unallocated space, 773 * which must copy from the backing file) 774 */ 775 static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, 776 uint64_t *l2_table, int l2_index) 777 { 778 int i; 779 780 for (i = 0; i < nb_clusters; i++) { 781 uint64_t l2_entry = be64_to_cpu(l2_table[l2_index + i]); 782 int cluster_type = qcow2_get_cluster_type(l2_entry); 783 784 switch(cluster_type) { 785 case QCOW2_CLUSTER_NORMAL: 786 if (l2_entry & QCOW_OFLAG_COPIED) { 787 goto out; 788 } 789 break; 790 case QCOW2_CLUSTER_UNALLOCATED: 791 case QCOW2_CLUSTER_COMPRESSED: 792 case QCOW2_CLUSTER_ZERO: 793 break; 794 default: 795 abort(); 796 } 797 } 798 799 out: 800 assert(i <= nb_clusters); 801 return i; 802 } 803 804 /* 805 * Check if there already is an AIO write request in flight which allocates 806 * the same cluster. In this case we need to wait until the previous 807 * request has completed and updated the L2 table accordingly. 808 * 809 * Returns: 810 * 0 if there was no dependency. *cur_bytes indicates the number of 811 * bytes from guest_offset that can be read before the next 812 * dependency must be processed (or the request is complete) 813 * 814 * -EAGAIN if we had to wait for another request, previously gathered 815 * information on cluster allocation may be invalid now. The caller 816 * must start over anyway, so consider *cur_bytes undefined. 817 */ 818 static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset, 819 uint64_t *cur_bytes, QCowL2Meta **m) 820 { 821 BDRVQcowState *s = bs->opaque; 822 QCowL2Meta *old_alloc; 823 uint64_t bytes = *cur_bytes; 824 825 QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { 826 827 uint64_t start = guest_offset; 828 uint64_t end = start + bytes; 829 uint64_t old_start = l2meta_cow_start(old_alloc); 830 uint64_t old_end = l2meta_cow_end(old_alloc); 831 832 if (end <= old_start || start >= old_end) { 833 /* No intersection */ 834 } else { 835 if (start < old_start) { 836 /* Stop at the start of a running allocation */ 837 bytes = old_start - start; 838 } else { 839 bytes = 0; 840 } 841 842 /* Stop if already an l2meta exists. After yielding, it wouldn't 843 * be valid any more, so we'd have to clean up the old L2Metas 844 * and deal with requests depending on them before starting to 845 * gather new ones. Not worth the trouble. */ 846 if (bytes == 0 && *m) { 847 *cur_bytes = 0; 848 return 0; 849 } 850 851 if (bytes == 0) { 852 /* Wait for the dependency to complete. We need to recheck 853 * the free/allocated clusters when we continue. */ 854 qemu_co_mutex_unlock(&s->lock); 855 qemu_co_queue_wait(&old_alloc->dependent_requests); 856 qemu_co_mutex_lock(&s->lock); 857 return -EAGAIN; 858 } 859 } 860 } 861 862 /* Make sure that existing clusters and new allocations are only used up to 863 * the next dependency if we shortened the request above */ 864 *cur_bytes = bytes; 865 866 return 0; 867 } 868 869 /* 870 * Checks how many already allocated clusters that don't require a copy on 871 * write there are at the given guest_offset (up to *bytes). If 872 * *host_offset is not zero, only physically contiguous clusters beginning at 873 * this host offset are counted. 874 * 875 * Note that guest_offset may not be cluster aligned. In this case, the 876 * returned *host_offset points to exact byte referenced by guest_offset and 877 * therefore isn't cluster aligned as well. 878 * 879 * Returns: 880 * 0: if no allocated clusters are available at the given offset. 881 * *bytes is normally unchanged. It is set to 0 if the cluster 882 * is allocated and doesn't need COW, but doesn't have the right 883 * physical offset. 884 * 885 * 1: if allocated clusters that don't require a COW are available at 886 * the requested offset. *bytes may have decreased and describes 887 * the length of the area that can be written to. 888 * 889 * -errno: in error cases 890 */ 891 static int handle_copied(BlockDriverState *bs, uint64_t guest_offset, 892 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 893 { 894 BDRVQcowState *s = bs->opaque; 895 int l2_index; 896 uint64_t cluster_offset; 897 uint64_t *l2_table; 898 unsigned int nb_clusters; 899 unsigned int keep_clusters; 900 int ret, pret; 901 902 trace_qcow2_handle_copied(qemu_coroutine_self(), guest_offset, *host_offset, 903 *bytes); 904 905 assert(*host_offset == 0 || offset_into_cluster(s, guest_offset) 906 == offset_into_cluster(s, *host_offset)); 907 908 /* 909 * Calculate the number of clusters to look for. We stop at L2 table 910 * boundaries to keep things simple. 911 */ 912 nb_clusters = 913 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 914 915 l2_index = offset_to_l2_index(s, guest_offset); 916 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 917 918 /* Find L2 entry for the first involved cluster */ 919 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 920 if (ret < 0) { 921 return ret; 922 } 923 924 cluster_offset = be64_to_cpu(l2_table[l2_index]); 925 926 /* Check how many clusters are already allocated and don't need COW */ 927 if (qcow2_get_cluster_type(cluster_offset) == QCOW2_CLUSTER_NORMAL 928 && (cluster_offset & QCOW_OFLAG_COPIED)) 929 { 930 /* If a specific host_offset is required, check it */ 931 bool offset_matches = 932 (cluster_offset & L2E_OFFSET_MASK) == *host_offset; 933 934 if (*host_offset != 0 && !offset_matches) { 935 *bytes = 0; 936 ret = 0; 937 goto out; 938 } 939 940 /* We keep all QCOW_OFLAG_COPIED clusters */ 941 keep_clusters = 942 count_contiguous_clusters(nb_clusters, s->cluster_size, 943 &l2_table[l2_index], 944 QCOW_OFLAG_COPIED | QCOW_OFLAG_ZERO); 945 assert(keep_clusters <= nb_clusters); 946 947 *bytes = MIN(*bytes, 948 keep_clusters * s->cluster_size 949 - offset_into_cluster(s, guest_offset)); 950 951 ret = 1; 952 } else { 953 ret = 0; 954 } 955 956 /* Cleanup */ 957 out: 958 pret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 959 if (pret < 0) { 960 return pret; 961 } 962 963 /* Only return a host offset if we actually made progress. Otherwise we 964 * would make requirements for handle_alloc() that it can't fulfill */ 965 if (ret) { 966 *host_offset = (cluster_offset & L2E_OFFSET_MASK) 967 + offset_into_cluster(s, guest_offset); 968 } 969 970 return ret; 971 } 972 973 /* 974 * Allocates new clusters for the given guest_offset. 975 * 976 * At most *nb_clusters are allocated, and on return *nb_clusters is updated to 977 * contain the number of clusters that have been allocated and are contiguous 978 * in the image file. 979 * 980 * If *host_offset is non-zero, it specifies the offset in the image file at 981 * which the new clusters must start. *nb_clusters can be 0 on return in this 982 * case if the cluster at host_offset is already in use. If *host_offset is 983 * zero, the clusters can be allocated anywhere in the image file. 984 * 985 * *host_offset is updated to contain the offset into the image file at which 986 * the first allocated cluster starts. 987 * 988 * Return 0 on success and -errno in error cases. -EAGAIN means that the 989 * function has been waiting for another request and the allocation must be 990 * restarted, but the whole request should not be failed. 991 */ 992 static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, 993 uint64_t *host_offset, unsigned int *nb_clusters) 994 { 995 BDRVQcowState *s = bs->opaque; 996 997 trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, 998 *host_offset, *nb_clusters); 999 1000 /* Allocate new clusters */ 1001 trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); 1002 if (*host_offset == 0) { 1003 int64_t cluster_offset = 1004 qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); 1005 if (cluster_offset < 0) { 1006 return cluster_offset; 1007 } 1008 *host_offset = cluster_offset; 1009 return 0; 1010 } else { 1011 int ret = qcow2_alloc_clusters_at(bs, *host_offset, *nb_clusters); 1012 if (ret < 0) { 1013 return ret; 1014 } 1015 *nb_clusters = ret; 1016 return 0; 1017 } 1018 } 1019 1020 /* 1021 * Allocates new clusters for an area that either is yet unallocated or needs a 1022 * copy on write. If *host_offset is non-zero, clusters are only allocated if 1023 * the new allocation can match the specified host offset. 1024 * 1025 * Note that guest_offset may not be cluster aligned. In this case, the 1026 * returned *host_offset points to exact byte referenced by guest_offset and 1027 * therefore isn't cluster aligned as well. 1028 * 1029 * Returns: 1030 * 0: if no clusters could be allocated. *bytes is set to 0, 1031 * *host_offset is left unchanged. 1032 * 1033 * 1: if new clusters were allocated. *bytes may be decreased if the 1034 * new allocation doesn't cover all of the requested area. 1035 * *host_offset is updated to contain the host offset of the first 1036 * newly allocated cluster. 1037 * 1038 * -errno: in error cases 1039 */ 1040 static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset, 1041 uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m) 1042 { 1043 BDRVQcowState *s = bs->opaque; 1044 int l2_index; 1045 uint64_t *l2_table; 1046 uint64_t entry; 1047 unsigned int nb_clusters; 1048 int ret; 1049 1050 uint64_t alloc_cluster_offset; 1051 1052 trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset, 1053 *bytes); 1054 assert(*bytes > 0); 1055 1056 /* 1057 * Calculate the number of clusters to look for. We stop at L2 table 1058 * boundaries to keep things simple. 1059 */ 1060 nb_clusters = 1061 size_to_clusters(s, offset_into_cluster(s, guest_offset) + *bytes); 1062 1063 l2_index = offset_to_l2_index(s, guest_offset); 1064 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1065 1066 /* Find L2 entry for the first involved cluster */ 1067 ret = get_cluster_table(bs, guest_offset, &l2_table, &l2_index); 1068 if (ret < 0) { 1069 return ret; 1070 } 1071 1072 entry = be64_to_cpu(l2_table[l2_index]); 1073 1074 /* For the moment, overwrite compressed clusters one by one */ 1075 if (entry & QCOW_OFLAG_COMPRESSED) { 1076 nb_clusters = 1; 1077 } else { 1078 nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); 1079 } 1080 1081 /* This function is only called when there were no non-COW clusters, so if 1082 * we can't find any unallocated or COW clusters either, something is 1083 * wrong with our code. */ 1084 assert(nb_clusters > 0); 1085 1086 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1087 if (ret < 0) { 1088 return ret; 1089 } 1090 1091 /* Allocate, if necessary at a given offset in the image file */ 1092 alloc_cluster_offset = start_of_cluster(s, *host_offset); 1093 ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset, 1094 &nb_clusters); 1095 if (ret < 0) { 1096 goto fail; 1097 } 1098 1099 /* Can't extend contiguous allocation */ 1100 if (nb_clusters == 0) { 1101 *bytes = 0; 1102 return 0; 1103 } 1104 1105 /* 1106 * Save info needed for meta data update. 1107 * 1108 * requested_sectors: Number of sectors from the start of the first 1109 * newly allocated cluster to the end of the (possibly shortened 1110 * before) write request. 1111 * 1112 * avail_sectors: Number of sectors from the start of the first 1113 * newly allocated to the end of the last newly allocated cluster. 1114 * 1115 * nb_sectors: The number of sectors from the start of the first 1116 * newly allocated cluster to the end of the area that the write 1117 * request actually writes to (excluding COW at the end) 1118 */ 1119 int requested_sectors = 1120 (*bytes + offset_into_cluster(s, guest_offset)) 1121 >> BDRV_SECTOR_BITS; 1122 int avail_sectors = nb_clusters 1123 << (s->cluster_bits - BDRV_SECTOR_BITS); 1124 int alloc_n_start = offset_into_cluster(s, guest_offset) 1125 >> BDRV_SECTOR_BITS; 1126 int nb_sectors = MIN(requested_sectors, avail_sectors); 1127 QCowL2Meta *old_m = *m; 1128 1129 *m = g_malloc0(sizeof(**m)); 1130 1131 **m = (QCowL2Meta) { 1132 .next = old_m, 1133 1134 .alloc_offset = alloc_cluster_offset, 1135 .offset = start_of_cluster(s, guest_offset), 1136 .nb_clusters = nb_clusters, 1137 .nb_available = nb_sectors, 1138 1139 .cow_start = { 1140 .offset = 0, 1141 .nb_sectors = alloc_n_start, 1142 }, 1143 .cow_end = { 1144 .offset = nb_sectors * BDRV_SECTOR_SIZE, 1145 .nb_sectors = avail_sectors - nb_sectors, 1146 }, 1147 }; 1148 qemu_co_queue_init(&(*m)->dependent_requests); 1149 QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight); 1150 1151 *host_offset = alloc_cluster_offset + offset_into_cluster(s, guest_offset); 1152 *bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE) 1153 - offset_into_cluster(s, guest_offset)); 1154 assert(*bytes != 0); 1155 1156 return 1; 1157 1158 fail: 1159 if (*m && (*m)->nb_clusters > 0) { 1160 QLIST_REMOVE(*m, next_in_flight); 1161 } 1162 return ret; 1163 } 1164 1165 /* 1166 * alloc_cluster_offset 1167 * 1168 * For a given offset on the virtual disk, find the cluster offset in qcow2 1169 * file. If the offset is not found, allocate a new cluster. 1170 * 1171 * If the cluster was already allocated, m->nb_clusters is set to 0 and 1172 * other fields in m are meaningless. 1173 * 1174 * If the cluster is newly allocated, m->nb_clusters is set to the number of 1175 * contiguous clusters that have been allocated. In this case, the other 1176 * fields of m are valid and contain information about the first allocated 1177 * cluster. 1178 * 1179 * If the request conflicts with another write request in flight, the coroutine 1180 * is queued and will be reentered when the dependency has completed. 1181 * 1182 * Return 0 on success and -errno in error cases 1183 */ 1184 int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, 1185 int *num, uint64_t *host_offset, QCowL2Meta **m) 1186 { 1187 BDRVQcowState *s = bs->opaque; 1188 uint64_t start, remaining; 1189 uint64_t cluster_offset; 1190 uint64_t cur_bytes; 1191 int ret; 1192 1193 trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, *num); 1194 1195 assert((offset & ~BDRV_SECTOR_MASK) == 0); 1196 1197 again: 1198 start = offset; 1199 remaining = *num << BDRV_SECTOR_BITS; 1200 cluster_offset = 0; 1201 *host_offset = 0; 1202 cur_bytes = 0; 1203 *m = NULL; 1204 1205 while (true) { 1206 1207 if (!*host_offset) { 1208 *host_offset = start_of_cluster(s, cluster_offset); 1209 } 1210 1211 assert(remaining >= cur_bytes); 1212 1213 start += cur_bytes; 1214 remaining -= cur_bytes; 1215 cluster_offset += cur_bytes; 1216 1217 if (remaining == 0) { 1218 break; 1219 } 1220 1221 cur_bytes = remaining; 1222 1223 /* 1224 * Now start gathering as many contiguous clusters as possible: 1225 * 1226 * 1. Check for overlaps with in-flight allocations 1227 * 1228 * a) Overlap not in the first cluster -> shorten this request and 1229 * let the caller handle the rest in its next loop iteration. 1230 * 1231 * b) Real overlaps of two requests. Yield and restart the search 1232 * for contiguous clusters (the situation could have changed 1233 * while we were sleeping) 1234 * 1235 * c) TODO: Request starts in the same cluster as the in-flight 1236 * allocation ends. Shorten the COW of the in-fight allocation, 1237 * set cluster_offset to write to the same cluster and set up 1238 * the right synchronisation between the in-flight request and 1239 * the new one. 1240 */ 1241 ret = handle_dependencies(bs, start, &cur_bytes, m); 1242 if (ret == -EAGAIN) { 1243 /* Currently handle_dependencies() doesn't yield if we already had 1244 * an allocation. If it did, we would have to clean up the L2Meta 1245 * structs before starting over. */ 1246 assert(*m == NULL); 1247 goto again; 1248 } else if (ret < 0) { 1249 return ret; 1250 } else if (cur_bytes == 0) { 1251 break; 1252 } else { 1253 /* handle_dependencies() may have decreased cur_bytes (shortened 1254 * the allocations below) so that the next dependency is processed 1255 * correctly during the next loop iteration. */ 1256 } 1257 1258 /* 1259 * 2. Count contiguous COPIED clusters. 1260 */ 1261 ret = handle_copied(bs, start, &cluster_offset, &cur_bytes, m); 1262 if (ret < 0) { 1263 return ret; 1264 } else if (ret) { 1265 continue; 1266 } else if (cur_bytes == 0) { 1267 break; 1268 } 1269 1270 /* 1271 * 3. If the request still hasn't completed, allocate new clusters, 1272 * considering any cluster_offset of steps 1c or 2. 1273 */ 1274 ret = handle_alloc(bs, start, &cluster_offset, &cur_bytes, m); 1275 if (ret < 0) { 1276 return ret; 1277 } else if (ret) { 1278 continue; 1279 } else { 1280 assert(cur_bytes == 0); 1281 break; 1282 } 1283 } 1284 1285 *num -= remaining >> BDRV_SECTOR_BITS; 1286 assert(*num > 0); 1287 assert(*host_offset != 0); 1288 1289 return 0; 1290 } 1291 1292 static int decompress_buffer(uint8_t *out_buf, int out_buf_size, 1293 const uint8_t *buf, int buf_size) 1294 { 1295 z_stream strm1, *strm = &strm1; 1296 int ret, out_len; 1297 1298 memset(strm, 0, sizeof(*strm)); 1299 1300 strm->next_in = (uint8_t *)buf; 1301 strm->avail_in = buf_size; 1302 strm->next_out = out_buf; 1303 strm->avail_out = out_buf_size; 1304 1305 ret = inflateInit2(strm, -12); 1306 if (ret != Z_OK) 1307 return -1; 1308 ret = inflate(strm, Z_FINISH); 1309 out_len = strm->next_out - out_buf; 1310 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || 1311 out_len != out_buf_size) { 1312 inflateEnd(strm); 1313 return -1; 1314 } 1315 inflateEnd(strm); 1316 return 0; 1317 } 1318 1319 int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) 1320 { 1321 BDRVQcowState *s = bs->opaque; 1322 int ret, csize, nb_csectors, sector_offset; 1323 uint64_t coffset; 1324 1325 coffset = cluster_offset & s->cluster_offset_mask; 1326 if (s->cluster_cache_offset != coffset) { 1327 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; 1328 sector_offset = coffset & 511; 1329 csize = nb_csectors * 512 - sector_offset; 1330 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); 1331 ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); 1332 if (ret < 0) { 1333 return ret; 1334 } 1335 if (decompress_buffer(s->cluster_cache, s->cluster_size, 1336 s->cluster_data + sector_offset, csize) < 0) { 1337 return -EIO; 1338 } 1339 s->cluster_cache_offset = coffset; 1340 } 1341 return 0; 1342 } 1343 1344 /* 1345 * This discards as many clusters of nb_clusters as possible at once (i.e. 1346 * all clusters in the same L2 table) and returns the number of discarded 1347 * clusters. 1348 */ 1349 static int discard_single_l2(BlockDriverState *bs, uint64_t offset, 1350 unsigned int nb_clusters, enum qcow2_discard_type type) 1351 { 1352 BDRVQcowState *s = bs->opaque; 1353 uint64_t *l2_table; 1354 int l2_index; 1355 int ret; 1356 int i; 1357 1358 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1359 if (ret < 0) { 1360 return ret; 1361 } 1362 1363 /* Limit nb_clusters to one L2 table */ 1364 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1365 1366 for (i = 0; i < nb_clusters; i++) { 1367 uint64_t old_offset; 1368 1369 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1370 if ((old_offset & L2E_OFFSET_MASK) == 0) { 1371 continue; 1372 } 1373 1374 /* First remove L2 entries */ 1375 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1376 l2_table[l2_index + i] = cpu_to_be64(0); 1377 1378 /* Then decrease the refcount */ 1379 qcow2_free_any_clusters(bs, old_offset, 1, type); 1380 } 1381 1382 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1383 if (ret < 0) { 1384 return ret; 1385 } 1386 1387 return nb_clusters; 1388 } 1389 1390 int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, 1391 int nb_sectors, enum qcow2_discard_type type) 1392 { 1393 BDRVQcowState *s = bs->opaque; 1394 uint64_t end_offset; 1395 unsigned int nb_clusters; 1396 int ret; 1397 1398 end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); 1399 1400 /* Round start up and end down */ 1401 offset = align_offset(offset, s->cluster_size); 1402 end_offset = start_of_cluster(s, end_offset); 1403 1404 if (offset > end_offset) { 1405 return 0; 1406 } 1407 1408 nb_clusters = size_to_clusters(s, end_offset - offset); 1409 1410 s->cache_discards = true; 1411 1412 /* Each L2 table is handled by its own loop iteration */ 1413 while (nb_clusters > 0) { 1414 ret = discard_single_l2(bs, offset, nb_clusters, type); 1415 if (ret < 0) { 1416 goto fail; 1417 } 1418 1419 nb_clusters -= ret; 1420 offset += (ret * s->cluster_size); 1421 } 1422 1423 ret = 0; 1424 fail: 1425 s->cache_discards = false; 1426 qcow2_process_discards(bs, ret); 1427 1428 return ret; 1429 } 1430 1431 /* 1432 * This zeroes as many clusters of nb_clusters as possible at once (i.e. 1433 * all clusters in the same L2 table) and returns the number of zeroed 1434 * clusters. 1435 */ 1436 static int zero_single_l2(BlockDriverState *bs, uint64_t offset, 1437 unsigned int nb_clusters) 1438 { 1439 BDRVQcowState *s = bs->opaque; 1440 uint64_t *l2_table; 1441 int l2_index; 1442 int ret; 1443 int i; 1444 1445 ret = get_cluster_table(bs, offset, &l2_table, &l2_index); 1446 if (ret < 0) { 1447 return ret; 1448 } 1449 1450 /* Limit nb_clusters to one L2 table */ 1451 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); 1452 1453 for (i = 0; i < nb_clusters; i++) { 1454 uint64_t old_offset; 1455 1456 old_offset = be64_to_cpu(l2_table[l2_index + i]); 1457 1458 /* Update L2 entries */ 1459 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1460 if (old_offset & QCOW_OFLAG_COMPRESSED) { 1461 l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO); 1462 qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST); 1463 } else { 1464 l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO); 1465 } 1466 } 1467 1468 ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); 1469 if (ret < 0) { 1470 return ret; 1471 } 1472 1473 return nb_clusters; 1474 } 1475 1476 int qcow2_zero_clusters(BlockDriverState *bs, uint64_t offset, int nb_sectors) 1477 { 1478 BDRVQcowState *s = bs->opaque; 1479 unsigned int nb_clusters; 1480 int ret; 1481 1482 /* The zero flag is only supported by version 3 and newer */ 1483 if (s->qcow_version < 3) { 1484 return -ENOTSUP; 1485 } 1486 1487 /* Each L2 table is handled by its own loop iteration */ 1488 nb_clusters = size_to_clusters(s, nb_sectors << BDRV_SECTOR_BITS); 1489 1490 s->cache_discards = true; 1491 1492 while (nb_clusters > 0) { 1493 ret = zero_single_l2(bs, offset, nb_clusters); 1494 if (ret < 0) { 1495 goto fail; 1496 } 1497 1498 nb_clusters -= ret; 1499 offset += (ret * s->cluster_size); 1500 } 1501 1502 ret = 0; 1503 fail: 1504 s->cache_discards = false; 1505 qcow2_process_discards(bs, ret); 1506 1507 return ret; 1508 } 1509 1510 /* 1511 * Expands all zero clusters in a specific L1 table (or deallocates them, for 1512 * non-backed non-pre-allocated zero clusters). 1513 * 1514 * expanded_clusters is a bitmap where every bit corresponds to one cluster in 1515 * the image file; a bit gets set if the corresponding cluster has been used for 1516 * zero expansion (i.e., has been filled with zeroes and is referenced from an 1517 * L2 table). nb_clusters contains the total cluster count of the image file, 1518 * i.e., the number of bits in expanded_clusters. 1519 */ 1520 static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table, 1521 int l1_size, uint8_t **expanded_clusters, 1522 uint64_t *nb_clusters) 1523 { 1524 BDRVQcowState *s = bs->opaque; 1525 bool is_active_l1 = (l1_table == s->l1_table); 1526 uint64_t *l2_table = NULL; 1527 int ret; 1528 int i, j; 1529 1530 if (!is_active_l1) { 1531 /* inactive L2 tables require a buffer to be stored in when loading 1532 * them from disk */ 1533 l2_table = qemu_blockalign(bs, s->cluster_size); 1534 } 1535 1536 for (i = 0; i < l1_size; i++) { 1537 uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK; 1538 bool l2_dirty = false; 1539 1540 if (!l2_offset) { 1541 /* unallocated */ 1542 continue; 1543 } 1544 1545 if (is_active_l1) { 1546 /* get active L2 tables from cache */ 1547 ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, 1548 (void **)&l2_table); 1549 } else { 1550 /* load inactive L2 tables from disk */ 1551 ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1552 (void *)l2_table, s->cluster_sectors); 1553 } 1554 if (ret < 0) { 1555 goto fail; 1556 } 1557 1558 for (j = 0; j < s->l2_size; j++) { 1559 uint64_t l2_entry = be64_to_cpu(l2_table[j]); 1560 int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index; 1561 int cluster_type = qcow2_get_cluster_type(l2_entry); 1562 bool preallocated = offset != 0; 1563 1564 if (cluster_type == QCOW2_CLUSTER_NORMAL) { 1565 cluster_index = offset >> s->cluster_bits; 1566 assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); 1567 if ((*expanded_clusters)[cluster_index / 8] & 1568 (1 << (cluster_index % 8))) { 1569 /* Probably a shared L2 table; this cluster was a zero 1570 * cluster which has been expanded, its refcount 1571 * therefore most likely requires an update. */ 1572 ret = qcow2_update_cluster_refcount(bs, cluster_index, 1, 1573 QCOW2_DISCARD_NEVER); 1574 if (ret < 0) { 1575 goto fail; 1576 } 1577 /* Since we just increased the refcount, the COPIED flag may 1578 * no longer be set. */ 1579 l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED); 1580 l2_dirty = true; 1581 } 1582 continue; 1583 } 1584 else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) { 1585 continue; 1586 } 1587 1588 if (!preallocated) { 1589 if (!bs->backing_hd) { 1590 /* not backed; therefore we can simply deallocate the 1591 * cluster */ 1592 l2_table[j] = 0; 1593 l2_dirty = true; 1594 continue; 1595 } 1596 1597 offset = qcow2_alloc_clusters(bs, s->cluster_size); 1598 if (offset < 0) { 1599 ret = offset; 1600 goto fail; 1601 } 1602 } 1603 1604 ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size); 1605 if (ret < 0) { 1606 if (!preallocated) { 1607 qcow2_free_clusters(bs, offset, s->cluster_size, 1608 QCOW2_DISCARD_ALWAYS); 1609 } 1610 goto fail; 1611 } 1612 1613 ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE, 1614 s->cluster_sectors, 0); 1615 if (ret < 0) { 1616 if (!preallocated) { 1617 qcow2_free_clusters(bs, offset, s->cluster_size, 1618 QCOW2_DISCARD_ALWAYS); 1619 } 1620 goto fail; 1621 } 1622 1623 l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED); 1624 l2_dirty = true; 1625 1626 cluster_index = offset >> s->cluster_bits; 1627 1628 if (cluster_index >= *nb_clusters) { 1629 uint64_t old_bitmap_size = (*nb_clusters + 7) / 8; 1630 uint64_t new_bitmap_size; 1631 /* The offset may lie beyond the old end of the underlying image 1632 * file for growable files only */ 1633 assert(bs->file->growable); 1634 *nb_clusters = size_to_clusters(s, bs->file->total_sectors * 1635 BDRV_SECTOR_SIZE); 1636 new_bitmap_size = (*nb_clusters + 7) / 8; 1637 *expanded_clusters = g_realloc(*expanded_clusters, 1638 new_bitmap_size); 1639 /* clear the newly allocated space */ 1640 memset(&(*expanded_clusters)[old_bitmap_size], 0, 1641 new_bitmap_size - old_bitmap_size); 1642 } 1643 1644 assert((cluster_index >= 0) && (cluster_index < *nb_clusters)); 1645 (*expanded_clusters)[cluster_index / 8] |= 1 << (cluster_index % 8); 1646 } 1647 1648 if (is_active_l1) { 1649 if (l2_dirty) { 1650 qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); 1651 qcow2_cache_depends_on_flush(s->l2_table_cache); 1652 } 1653 ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1654 if (ret < 0) { 1655 l2_table = NULL; 1656 goto fail; 1657 } 1658 } else { 1659 if (l2_dirty) { 1660 ret = qcow2_pre_write_overlap_check(bs, 1661 QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2, l2_offset, 1662 s->cluster_size); 1663 if (ret < 0) { 1664 goto fail; 1665 } 1666 1667 ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE, 1668 (void *)l2_table, s->cluster_sectors); 1669 if (ret < 0) { 1670 goto fail; 1671 } 1672 } 1673 } 1674 } 1675 1676 ret = 0; 1677 1678 fail: 1679 if (l2_table) { 1680 if (!is_active_l1) { 1681 qemu_vfree(l2_table); 1682 } else { 1683 if (ret < 0) { 1684 qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table); 1685 } else { 1686 ret = qcow2_cache_put(bs, s->l2_table_cache, 1687 (void **)&l2_table); 1688 } 1689 } 1690 } 1691 return ret; 1692 } 1693 1694 /* 1695 * For backed images, expands all zero clusters on the image. For non-backed 1696 * images, deallocates all non-pre-allocated zero clusters (and claims the 1697 * allocation for pre-allocated ones). This is important for downgrading to a 1698 * qcow2 version which doesn't yet support metadata zero clusters. 1699 */ 1700 int qcow2_expand_zero_clusters(BlockDriverState *bs) 1701 { 1702 BDRVQcowState *s = bs->opaque; 1703 uint64_t *l1_table = NULL; 1704 uint64_t nb_clusters; 1705 uint8_t *expanded_clusters; 1706 int ret; 1707 int i, j; 1708 1709 nb_clusters = size_to_clusters(s, bs->file->total_sectors * 1710 BDRV_SECTOR_SIZE); 1711 expanded_clusters = g_malloc0((nb_clusters + 7) / 8); 1712 1713 ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size, 1714 &expanded_clusters, &nb_clusters); 1715 if (ret < 0) { 1716 goto fail; 1717 } 1718 1719 /* Inactive L1 tables may point to active L2 tables - therefore it is 1720 * necessary to flush the L2 table cache before trying to access the L2 1721 * tables pointed to by inactive L1 entries (else we might try to expand 1722 * zero clusters that have already been expanded); furthermore, it is also 1723 * necessary to empty the L2 table cache, since it may contain tables which 1724 * are now going to be modified directly on disk, bypassing the cache. 1725 * qcow2_cache_empty() does both for us. */ 1726 ret = qcow2_cache_empty(bs, s->l2_table_cache); 1727 if (ret < 0) { 1728 goto fail; 1729 } 1730 1731 for (i = 0; i < s->nb_snapshots; i++) { 1732 int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) + 1733 BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE; 1734 1735 l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE); 1736 1737 ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset / 1738 BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors); 1739 if (ret < 0) { 1740 goto fail; 1741 } 1742 1743 for (j = 0; j < s->snapshots[i].l1_size; j++) { 1744 be64_to_cpus(&l1_table[j]); 1745 } 1746 1747 ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size, 1748 &expanded_clusters, &nb_clusters); 1749 if (ret < 0) { 1750 goto fail; 1751 } 1752 } 1753 1754 ret = 0; 1755 1756 fail: 1757 g_free(expanded_clusters); 1758 g_free(l1_table); 1759 return ret; 1760 } 1761